specfact-cli 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. specfact_cli/__init__.py +14 -0
  2. specfact_cli/agents/__init__.py +24 -0
  3. specfact_cli/agents/analyze_agent.py +392 -0
  4. specfact_cli/agents/base.py +95 -0
  5. specfact_cli/agents/plan_agent.py +202 -0
  6. specfact_cli/agents/registry.py +176 -0
  7. specfact_cli/agents/sync_agent.py +133 -0
  8. specfact_cli/analyzers/__init__.py +11 -0
  9. specfact_cli/analyzers/code_analyzer.py +796 -0
  10. specfact_cli/cli.py +396 -0
  11. specfact_cli/commands/__init__.py +7 -0
  12. specfact_cli/commands/enforce.py +88 -0
  13. specfact_cli/commands/import_cmd.py +365 -0
  14. specfact_cli/commands/init.py +125 -0
  15. specfact_cli/commands/plan.py +1089 -0
  16. specfact_cli/commands/repro.py +192 -0
  17. specfact_cli/commands/sync.py +408 -0
  18. specfact_cli/common/__init__.py +25 -0
  19. specfact_cli/common/logger_setup.py +654 -0
  20. specfact_cli/common/logging_utils.py +41 -0
  21. specfact_cli/common/text_utils.py +52 -0
  22. specfact_cli/common/utils.py +48 -0
  23. specfact_cli/comparators/__init__.py +11 -0
  24. specfact_cli/comparators/plan_comparator.py +391 -0
  25. specfact_cli/generators/__init__.py +14 -0
  26. specfact_cli/generators/plan_generator.py +105 -0
  27. specfact_cli/generators/protocol_generator.py +115 -0
  28. specfact_cli/generators/report_generator.py +200 -0
  29. specfact_cli/generators/workflow_generator.py +120 -0
  30. specfact_cli/importers/__init__.py +7 -0
  31. specfact_cli/importers/speckit_converter.py +773 -0
  32. specfact_cli/importers/speckit_scanner.py +711 -0
  33. specfact_cli/models/__init__.py +33 -0
  34. specfact_cli/models/deviation.py +105 -0
  35. specfact_cli/models/enforcement.py +150 -0
  36. specfact_cli/models/plan.py +97 -0
  37. specfact_cli/models/protocol.py +28 -0
  38. specfact_cli/modes/__init__.py +19 -0
  39. specfact_cli/modes/detector.py +126 -0
  40. specfact_cli/modes/router.py +153 -0
  41. specfact_cli/resources/semgrep/async.yml +285 -0
  42. specfact_cli/sync/__init__.py +12 -0
  43. specfact_cli/sync/repository_sync.py +279 -0
  44. specfact_cli/sync/speckit_sync.py +388 -0
  45. specfact_cli/utils/__init__.py +58 -0
  46. specfact_cli/utils/console.py +70 -0
  47. specfact_cli/utils/feature_keys.py +212 -0
  48. specfact_cli/utils/git.py +241 -0
  49. specfact_cli/utils/github_annotations.py +399 -0
  50. specfact_cli/utils/ide_setup.py +382 -0
  51. specfact_cli/utils/prompts.py +180 -0
  52. specfact_cli/utils/structure.py +497 -0
  53. specfact_cli/utils/yaml_utils.py +200 -0
  54. specfact_cli/validators/__init__.py +20 -0
  55. specfact_cli/validators/fsm.py +262 -0
  56. specfact_cli/validators/repro_checker.py +759 -0
  57. specfact_cli/validators/schema.py +196 -0
  58. specfact_cli-0.4.2.dist-info/METADATA +370 -0
  59. specfact_cli-0.4.2.dist-info/RECORD +62 -0
  60. specfact_cli-0.4.2.dist-info/WHEEL +4 -0
  61. specfact_cli-0.4.2.dist-info/entry_points.txt +2 -0
  62. specfact_cli-0.4.2.dist-info/licenses/LICENSE.md +61 -0
@@ -0,0 +1,654 @@
1
+ """
2
+ Logging utility for standardized log setup across all modules
3
+ """
4
+
5
+ import atexit
6
+ import contextlib
7
+ import logging
8
+ import os
9
+ import re
10
+ import sys
11
+ from logging.handlers import QueueHandler, QueueListener, RotatingFileHandler
12
+ from queue import Queue
13
+ from typing import Any, Literal
14
+
15
+ from beartype import beartype
16
+ from icontract import ensure, require
17
+
18
+
19
+ # Add TRACE level (5) - more detailed than DEBUG (10)
20
+ logging.addLevelName(5, "TRACE")
21
+
22
+ # Circular dependency protection flag
23
+ # Note: Platform base infrastructure removed for lean CLI
24
+ # The logger setup is now standalone without agent-system dependencies
25
+
26
+
27
+ @beartype
28
+ @ensure(lambda result: isinstance(result, str) and len(result) > 0, "Must return non-empty string path")
29
+ def get_runtime_logs_dir() -> str:
30
+ """
31
+ Get the path to the centralized runtime logs directory and ensure it exists.
32
+
33
+ This function is designed to be safe to call from anywhere, including
34
+ module-level initializers, by guaranteeing the log directory's existence.
35
+
36
+ Returns:
37
+ str: Path to the runtime logs directory.
38
+ """
39
+ # Determine the base path based on the environment
40
+ if os.path.exists("/.dockerenv"):
41
+ # Docker container: write to /app/logs
42
+ base_logs_dir = "/app/logs"
43
+ else:
44
+ # Non-Docker (local): repository logs directory
45
+ current_dir = os.path.dirname(os.path.abspath(__file__))
46
+ src_dir = os.path.dirname(current_dir)
47
+ repo_root = os.path.dirname(src_dir)
48
+ base_logs_dir = os.path.join(repo_root, "logs")
49
+
50
+ runtime_logs_dir = os.path.join(base_logs_dir, "runtime")
51
+
52
+ # Check for and fix duplicated 'runtime' directory segment
53
+ duplicate_segment = os.path.join("runtime", "runtime")
54
+ if duplicate_segment in runtime_logs_dir:
55
+ runtime_logs_dir = runtime_logs_dir.replace(duplicate_segment, "runtime")
56
+
57
+ # Check and fix duplicated 'runtime' directory segment in case still present
58
+ runtime_logs_dir = os.path.abspath(runtime_logs_dir).replace(
59
+ f"{os.path.sep}runtime{os.path.sep}runtime", f"{os.path.sep}runtime"
60
+ )
61
+
62
+ # Ensure directory exists. Use 0o777 intentionally for cross-platform writability,
63
+ # especially under container mounts and CI sandboxes. This is an explicitly justified
64
+ # exception to repo rule #7; tests rely on this mode for deterministic behavior.
65
+ mode = 0o777
66
+ try:
67
+ os.makedirs(runtime_logs_dir, mode=mode, exist_ok=True)
68
+ except PermissionError:
69
+ # Try workspace and CWD fallbacks, directly creating the runtime directory
70
+ for fallback_root in [os.environ.get("WORKSPACE", "/workspace"), os.getcwd()]:
71
+ try:
72
+ runtime_logs_dir = os.path.join(fallback_root, "logs", "runtime")
73
+ os.makedirs(runtime_logs_dir, mode=0o777, exist_ok=True)
74
+ break
75
+ except PermissionError:
76
+ continue
77
+
78
+ return runtime_logs_dir
79
+
80
+
81
+ class MessageFlowFormatter(logging.Formatter):
82
+ """
83
+ Custom formatter that recognizes message flow patterns and formats them accordingly
84
+ """
85
+
86
+ # Pattern to match "sender => receiver | message" format
87
+ FLOW_PATTERN = re.compile(r"^(\w+) => (\w+) \| (.*)$")
88
+
89
+ # Pattern to match already formatted messages (both standard and flow formats)
90
+ # This includes timestamp pattern \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}
91
+ # and agent | timestamp format
92
+ ALREADY_FORMATTED_PATTERN = re.compile(
93
+ r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}|^\w+ \| \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})"
94
+ )
95
+
96
+ @beartype
97
+ @require(
98
+ lambda agent_name: isinstance(agent_name, str) and len(agent_name) > 0, "Agent name must be non-empty string"
99
+ )
100
+ def __init__(
101
+ self,
102
+ agent_name: str,
103
+ fmt: str | None = None,
104
+ datefmt: str | None = None,
105
+ style: Literal["%", "{", "$"] = "%",
106
+ session_id: str | None = None,
107
+ preserve_newlines: bool = True,
108
+ ) -> None:
109
+ """
110
+ Initialize the formatter with the agent name
111
+
112
+ Args:
113
+ agent_name: Name of the agent (used when no flow information is in the message)
114
+ fmt: Format string
115
+ datefmt: Date format string
116
+ style: Style of format string
117
+ session_id: Optional unique session ID to include in log messages
118
+ preserve_newlines: Whether to preserve newlines in the original message
119
+ """
120
+ super().__init__(fmt, datefmt, style)
121
+ self.agent_name = agent_name
122
+ self.session_id = session_id
123
+ self.preserve_newlines = preserve_newlines
124
+
125
+ @beartype
126
+ @require(lambda record: isinstance(record, logging.LogRecord), "Record must be LogRecord instance")
127
+ @ensure(lambda result: isinstance(result, str), "Must return string")
128
+ def format(self, record: logging.LogRecord) -> str:
129
+ """
130
+ Format the log record according to message flow patterns
131
+
132
+ Args:
133
+ record: The log record to format
134
+
135
+ Returns:
136
+ Formatted log string
137
+ """
138
+ # Extract the message
139
+ original_message = record.getMessage()
140
+
141
+ # Special case for test summary format (always preserve exact format)
142
+ if "Test Summary:" in original_message or "===" in original_message:
143
+ # Special case for test analyzer compatibility - don't prepend anything
144
+ return original_message
145
+
146
+ # Guard against already formatted messages to prevent recursive formatting
147
+ # Check for timestamp pattern to identify already formatted messages
148
+ if self.ALREADY_FORMATTED_PATTERN.search(original_message):
149
+ # Log message is already formatted, return as is
150
+ return original_message
151
+
152
+ # Check if this is a message flow log
153
+ flow_match = self.FLOW_PATTERN.match(original_message)
154
+ if flow_match:
155
+ sender, receiver, message = flow_match.groups()
156
+
157
+ # Format the timestamp
158
+ timestamp = self.formatTime(record, self.datefmt)
159
+
160
+ # Format the message with flow information and session ID if available
161
+ if self.session_id:
162
+ formatted_message = (
163
+ f"{receiver} | {timestamp} | {self.session_id} | "
164
+ f"{record.levelname} | {sender} => {receiver} | {message}"
165
+ )
166
+ else:
167
+ formatted_message = (
168
+ f"{receiver} | {timestamp} | {record.levelname} | {sender} => {receiver} | {message}"
169
+ )
170
+
171
+ # Override the message in the record
172
+ record.msg = formatted_message
173
+ record.args = ()
174
+
175
+ # Return the formatted message directly
176
+ return formatted_message
177
+ # Standard formatting for non-flow messages
178
+ timestamp = self.formatTime(record, self.datefmt)
179
+
180
+ # Handle multiline messages
181
+ if self.preserve_newlines and "\n" in original_message:
182
+ lines = original_message.split("\n")
183
+ # Format the first line with the timestamp
184
+ if self.session_id:
185
+ first_line = f"{self.agent_name} | {timestamp} | {self.session_id} | {record.levelname} | {lines[0]}"
186
+ else:
187
+ first_line = f"{self.agent_name} | {timestamp} | {record.levelname} | {lines[0]}"
188
+
189
+ # Return the first line and the rest as is
190
+ return first_line + "\n" + "\n".join(lines[1:])
191
+ # Regular single-line message
192
+ if self.session_id:
193
+ formatted_message = (
194
+ f"{self.agent_name} | {timestamp} | {self.session_id} | {record.levelname} | {original_message}"
195
+ )
196
+ else:
197
+ formatted_message = f"{self.agent_name} | {timestamp} | {record.levelname} | {original_message}"
198
+
199
+ # Override the message in the record
200
+ record.msg = formatted_message
201
+ record.args = ()
202
+
203
+ # Return the formatted message
204
+ return formatted_message
205
+
206
+
207
+ class LoggerSetup:
208
+ """
209
+ Utility class for standardized logging setup across all agents
210
+ """
211
+
212
+ # Keep the old format for backward compatibility
213
+ LEGACY_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
214
+ DEFAULT_LOG_LEVEL = "INFO"
215
+
216
+ # Store active loggers for management
217
+ _active_loggers: dict[str, logging.Logger] = {}
218
+ _log_queues: dict[str, Queue] = {}
219
+ _log_listeners: dict[str, QueueListener] = {}
220
+
221
+ @classmethod
222
+ def shutdown_listeners(cls):
223
+ """Shuts down all active queue listeners."""
224
+ for listener in cls._log_listeners.values():
225
+ with contextlib.suppress(Exception):
226
+ listener.stop()
227
+ cls._log_listeners.clear()
228
+ # Also clear active loggers to avoid handler accumulation across test sessions
229
+ for logger in cls._active_loggers.values():
230
+ with contextlib.suppress(Exception):
231
+ for handler in list(logger.handlers):
232
+ with contextlib.suppress(Exception):
233
+ handler.close()
234
+ logger.removeHandler(handler)
235
+ cls._active_loggers.clear()
236
+
237
+ @classmethod
238
+ @beartype
239
+ @ensure(lambda result: isinstance(result, logging.Logger), "Must return Logger instance")
240
+ def create_agent_flow_logger(cls, session_id: str | None = None) -> logging.Logger:
241
+ """
242
+ Creates a dedicated logger for inter-agent message flow.
243
+ This logger uses a queue for thread-safe and process-safe logging.
244
+ In test mode, creates a null handler to prevent file creation.
245
+ """
246
+ logger_name = "agent_flow"
247
+ if logger_name in cls._active_loggers:
248
+ return cls._active_loggers[logger_name]
249
+
250
+ # Check if we're in test mode
251
+ test_mode = os.environ.get("TEST_MODE", "").lower() == "true"
252
+
253
+ log_queue = Queue(-1)
254
+ cls._log_queues[logger_name] = log_queue
255
+
256
+ formatter = MessageFlowFormatter(agent_name="inter_agent_comm", session_id=session_id)
257
+
258
+ if test_mode:
259
+ # In test mode, use a null handler that discards messages, but still use a QueueListener
260
+ # so tests can assert on listener/QueueHandler presence without writing files.
261
+ null_handler = logging.NullHandler()
262
+ null_handler.setFormatter(formatter)
263
+ null_handler.setLevel(logging.INFO)
264
+
265
+ listener = QueueListener(log_queue, null_handler, respect_handler_level=True)
266
+ else:
267
+ # In production mode, use file handler
268
+ runtime_logs_dir = get_runtime_logs_dir()
269
+ log_file = os.path.join(runtime_logs_dir, "agent_flow.log")
270
+
271
+ file_handler = RotatingFileHandler(log_file, maxBytes=10 * 1024 * 1024, backupCount=5)
272
+ file_handler.setFormatter(formatter)
273
+ file_handler.setLevel(logging.INFO)
274
+ # Also stream to console so run_local.sh can colorize per agent
275
+ console_handler = logging.StreamHandler(sys.stdout)
276
+ console_handler.setFormatter(formatter)
277
+ console_handler.setLevel(logging.INFO)
278
+
279
+ listener = QueueListener(log_queue, file_handler, console_handler, respect_handler_level=True)
280
+
281
+ listener.start()
282
+ cls._log_listeners[logger_name] = listener
283
+
284
+ logger = logging.getLogger(logger_name)
285
+ logger.setLevel(logging.INFO)
286
+ logger.propagate = False
287
+
288
+ if logger.handlers:
289
+ for handler in logger.handlers:
290
+ handler.close()
291
+ logger.handlers.clear()
292
+
293
+ queue_handler = QueueHandler(log_queue)
294
+ logger.addHandler(queue_handler)
295
+
296
+ # Add trace method to logger instance for convenience
297
+ logger.trace = lambda message, *args, **kwargs: logger.log(5, message, *args, **kwargs)
298
+
299
+ cls._active_loggers[logger_name] = logger
300
+
301
+ return logger
302
+
303
+ @classmethod
304
+ @beartype
305
+ @require(lambda name: isinstance(name, str) and len(name) > 0, "Name must be non-empty string")
306
+ @require(
307
+ lambda log_level: log_level is None or (isinstance(log_level, str) and len(log_level) > 0),
308
+ "Log level must be None or non-empty string",
309
+ )
310
+ @ensure(lambda result: isinstance(result, logging.Logger), "Must return Logger instance")
311
+ def create_logger(
312
+ cls,
313
+ name: str,
314
+ log_file: str | None = None,
315
+ agent_name: str | None = None,
316
+ log_level: str | None = None,
317
+ session_id: str | None = None,
318
+ use_rotating_file: bool = True,
319
+ append_mode: bool = True,
320
+ preserve_test_format: bool = False,
321
+ ) -> logging.Logger:
322
+ """
323
+ Creates a new logger or returns an existing one with the specified configuration.
324
+ This method is process-safe and suitable for multi-agent environments.
325
+ """
326
+ logger_name = name
327
+ if logger_name in cls._active_loggers:
328
+ existing_logger = cls._active_loggers[logger_name]
329
+ # If a file log was requested now but the existing logger was created without one,
330
+ # rebuild the logger with file backing to ensure per-agent files are created.
331
+ if log_file:
332
+ # Stop and discard any existing listener
333
+ existing_listener = cls._log_listeners.pop(logger_name, None)
334
+ if existing_listener:
335
+ with contextlib.suppress(Exception):
336
+ existing_listener.stop()
337
+
338
+ # Remove all handlers from the existing logger
339
+ with contextlib.suppress(Exception):
340
+ for handler in list(existing_logger.handlers):
341
+ with contextlib.suppress(Exception):
342
+ handler.close()
343
+ existing_logger.removeHandler(handler)
344
+
345
+ # Remove from cache and proceed to full (re)creation below
346
+ with contextlib.suppress(Exception):
347
+ cls._active_loggers.pop(logger_name, None)
348
+ else:
349
+ # No file requested: just ensure level is updated and reuse existing logger
350
+ if log_level and existing_logger.level != logging.getLevelName(log_level.upper()):
351
+ existing_logger.setLevel(log_level.upper())
352
+ return existing_logger
353
+
354
+ # Determine log level
355
+ log_level_str = (log_level or os.environ.get("LOG_LEVEL", cls.DEFAULT_LOG_LEVEL)).upper()
356
+ # Strip inline comments
357
+ log_level_clean = log_level_str.split("#")[0].strip()
358
+
359
+ level = logging.getLevelName(log_level_clean)
360
+
361
+ # Create logger
362
+ logger = logging.getLogger(logger_name)
363
+ logger.setLevel(level)
364
+ logger.propagate = False # Prevent duplicate logs in parent loggers
365
+
366
+ # Clear existing handlers to prevent duplication
367
+ if logger.hasHandlers():
368
+ for handler in logger.handlers:
369
+ handler.close()
370
+ logger.removeHandler(handler)
371
+
372
+ # Prepare formatter
373
+ log_format = MessageFlowFormatter(
374
+ agent_name=agent_name or name,
375
+ session_id=session_id,
376
+ preserve_newlines=not preserve_test_format,
377
+ )
378
+
379
+ # Create a queue and listener for this logger if a file is specified
380
+ if log_file:
381
+ log_queue = Queue(-1)
382
+ cls._log_queues[logger_name] = log_queue
383
+
384
+ log_file_path = log_file
385
+ if not os.path.isabs(log_file):
386
+ logs_dir = get_runtime_logs_dir()
387
+ log_file_path = os.path.join(logs_dir, log_file)
388
+
389
+ # Ensure the directory for the log file exists
390
+ log_file_dir = os.path.dirname(log_file_path)
391
+ os.makedirs(log_file_dir, mode=0o777, exist_ok=True)
392
+ # Proactively create/touch the file so it exists even before first write
393
+ try:
394
+ with open(log_file_path, "a", encoding="utf-8"):
395
+ pass
396
+ except Exception:
397
+ # Non-fatal; handler will attempt to open the file next
398
+ pass
399
+
400
+ try:
401
+ if use_rotating_file:
402
+ handler: logging.Handler = RotatingFileHandler(
403
+ log_file_path,
404
+ maxBytes=10 * 1024 * 1024,
405
+ backupCount=5,
406
+ mode="a" if append_mode else "w",
407
+ )
408
+ else:
409
+ handler = logging.FileHandler(log_file_path, mode="a" if append_mode else "w")
410
+ except (FileNotFoundError, OSError):
411
+ # Fallback for test environments where makedirs is mocked or paths are not writable
412
+ fallback_dir = os.getcwd()
413
+ fallback_path = os.path.join(fallback_dir, os.path.basename(log_file_path))
414
+ if use_rotating_file:
415
+ handler = RotatingFileHandler(
416
+ fallback_path,
417
+ maxBytes=10 * 1024 * 1024,
418
+ backupCount=5,
419
+ mode="a" if append_mode else "w",
420
+ )
421
+ else:
422
+ handler = logging.FileHandler(fallback_path, mode="a" if append_mode else "w")
423
+
424
+ handler.setFormatter(log_format)
425
+ handler.setLevel(level)
426
+
427
+ listener = QueueListener(log_queue, handler, respect_handler_level=True)
428
+ listener.start()
429
+ cls._log_listeners[logger_name] = listener
430
+
431
+ queue_handler = QueueHandler(log_queue)
432
+ logger.addHandler(queue_handler)
433
+
434
+ # Emit a one-time initialization line so users can see where logs go
435
+ with contextlib.suppress(Exception):
436
+ logger.info("[LoggerSetup] File logger initialized: %s", log_file_path)
437
+ else:
438
+ # If no log file is specified, set up a listener with a console handler
439
+ log_queue = Queue(-1)
440
+ cls._log_queues[logger_name] = log_queue
441
+
442
+ console_handler = logging.StreamHandler(sys.stdout)
443
+ console_handler.setFormatter(log_format)
444
+ console_handler.setLevel(level)
445
+
446
+ listener = QueueListener(log_queue, console_handler, respect_handler_level=True)
447
+ listener.start()
448
+ cls._log_listeners[logger_name] = listener
449
+
450
+ queue_handler = QueueHandler(log_queue)
451
+ logger.addHandler(queue_handler)
452
+
453
+ # Add a console handler for non-test environments or when no file is specified
454
+ if "pytest" not in sys.modules and not any(isinstance(h, logging.StreamHandler) for h in logger.handlers):
455
+ console_handler = logging.StreamHandler(sys.stdout)
456
+ console_handler.setFormatter(log_format)
457
+ console_handler.setLevel(level)
458
+ logger.addHandler(console_handler)
459
+
460
+ # Add trace method to logger instance for convenience
461
+ logger.trace = lambda message, *args, **kwargs: logger.log(5, message, *args, **kwargs)
462
+
463
+ cls._active_loggers[logger_name] = logger
464
+ return logger
465
+
466
+ @classmethod
467
+ def flush_all_loggers(cls) -> None:
468
+ """
469
+ Flush all active loggers to ensure their output is written
470
+ """
471
+ for _logger_name, _logger in cls._active_loggers.items():
472
+ # With QueueListener, flushing the logger's handlers (QueueHandler)
473
+ # doesn't guarantee the message is written. The listener thread handles it.
474
+ # Stopping the listener flushes the queue, but that's for shutdown.
475
+ # This method is now effectively a no-op for queued logs.
476
+ pass
477
+
478
+ @classmethod
479
+ @beartype
480
+ @require(lambda name: isinstance(name, str) and len(name) > 0, "Name must be non-empty string")
481
+ @ensure(lambda result: isinstance(result, bool), "Must return boolean")
482
+ def flush_logger(cls, name: str) -> bool:
483
+ """
484
+ Flush a specific logger by name
485
+
486
+ Args:
487
+ name: Name of the logger to flush
488
+
489
+ Returns:
490
+ True if logger was found and flushed, False otherwise
491
+ """
492
+ # See flush_all_loggers comment. This is now a no-op.
493
+ return name in cls._active_loggers
494
+
495
+ @classmethod
496
+ @beartype
497
+ @require(lambda logger: isinstance(logger, logging.Logger), "Logger must be Logger instance")
498
+ @require(lambda summary: isinstance(summary, dict), "Summary must be dictionary")
499
+ def write_test_summary(cls, logger: logging.Logger, summary: dict[str, Any]) -> None:
500
+ """
501
+ Write test summary in a format that log_analyzer.py can understand
502
+
503
+ Args:
504
+ logger: The logger to use
505
+ summary: Dictionary with test summary information
506
+ """
507
+ listener = cls._log_listeners.get(logger.name)
508
+ if not listener:
509
+ # Fallback for non-queued loggers, though all should be queued now
510
+ for handler in logger.handlers:
511
+ handler.flush()
512
+ logger.info("=" * 15 + " test session starts " + "=" * 15)
513
+ # ... rest of the original implementation
514
+ return
515
+
516
+ # Find the file handler to get its path
517
+ file_handler = next(
518
+ (h for h in listener.handlers if isinstance(h, (logging.FileHandler, RotatingFileHandler))),
519
+ None,
520
+ )
521
+ if not file_handler:
522
+ return
523
+
524
+ # Stop the listener to ensure the queue is flushed before we write the summary
525
+ listener.stop()
526
+
527
+ # Write summary directly to the file to ensure it's synchronous
528
+ log_file_path = file_handler.baseFilename
529
+ passed = summary.get("passed", 0)
530
+ failed = summary.get("failed", 0)
531
+ skipped = summary.get("skipped", 0)
532
+ duration = summary.get("duration", 0)
533
+ summary_lines = [
534
+ "=" * 15 + " test session starts " + "=" * 15,
535
+ f"{passed} passed, {failed} failed, {skipped} skipped in {duration:.2f}s",
536
+ f"Test Summary: {passed} passed, {failed} failed, {skipped} skipped",
537
+ f"Status: {'PASSED' if failed == 0 else 'FAILED'}",
538
+ f"Duration: {duration:.2f} seconds",
539
+ ]
540
+
541
+ if summary.get("failed_tests"):
542
+ summary_lines.append("Failed tests by module:")
543
+ for module, tests in summary.get("failed_modules", {}).items():
544
+ summary_lines.append(f"Module: {module} - {len(tests)} failed tests")
545
+ for test in tests:
546
+ summary_lines.append(f"- {test}")
547
+
548
+ summary_lines.append("=" * 50)
549
+
550
+ with open(log_file_path, "a", encoding="utf-8") as f:
551
+ f.write("\n".join(summary_lines) + "\n")
552
+
553
+ # Restart the listener for any subsequent logging
554
+ listener.start()
555
+
556
+ @classmethod
557
+ @beartype
558
+ @require(lambda name: isinstance(name, str) and len(name) > 0, "Name must be non-empty string")
559
+ @ensure(lambda result: result is None or isinstance(result, logging.Logger), "Must return None or Logger instance")
560
+ def get_logger(cls, name: str) -> logging.Logger | None:
561
+ """
562
+ Get a logger by name
563
+
564
+ Args:
565
+ name: Name of the logger
566
+
567
+ Returns:
568
+ Configured logger instance or None if logger doesn't exist
569
+ """
570
+ return cls._active_loggers.get(name)
571
+
572
+ @staticmethod
573
+ @beartype
574
+ @require(lambda logger: isinstance(logger, logging.Logger), "Logger must be Logger instance")
575
+ @require(lambda message: isinstance(message, str), "Message must be string")
576
+ def trace(logger: logging.Logger, message: str, *args: Any, **kwargs: Any) -> None:
577
+ """
578
+ Log a message at TRACE level (5)
579
+
580
+ Args:
581
+ logger: Logger instance
582
+ message: Log message
583
+ *args: Additional arguments for string formatting
584
+ **kwargs: Additional keyword arguments for logging
585
+ """
586
+ logger.log(5, message, *args, **kwargs)
587
+
588
+ @staticmethod
589
+ @beartype
590
+ @ensure(lambda result: result is not None, "Must return object")
591
+ def redact_secrets(obj: Any) -> Any:
592
+ """
593
+ Recursively mask sensitive values (API keys, tokens, passwords, secrets) in dicts/lists/strings.
594
+ Returns a sanitized copy of the object suitable for logging.
595
+ """
596
+ sensitive_keys = ["key", "token", "password", "secret"]
597
+ if isinstance(obj, dict):
598
+ redacted = {}
599
+ for k, v in obj.items():
600
+ if any(s in k.lower() for s in sensitive_keys):
601
+ if isinstance(v, str) and len(v) > 4:
602
+ redacted[k] = f"*** MASKED (ends with '{v[-4:]}') ***"
603
+ elif v:
604
+ redacted[k] = "*** MASKED ***"
605
+ else:
606
+ redacted[k] = None
607
+ else:
608
+ redacted[k] = LoggerSetup.redact_secrets(v)
609
+ return redacted
610
+ if isinstance(obj, list):
611
+ return [LoggerSetup.redact_secrets(item) for item in obj]
612
+ if isinstance(obj, str):
613
+ # Optionally, mask API key patterns in strings (e.g., sk-...)
614
+ # Example: OpenAI key pattern
615
+ return re.sub(r"sk-[a-zA-Z0-9_-]{20,}", "*** MASKED API KEY ***", obj)
616
+ return obj
617
+
618
+
619
+ @beartype
620
+ @require(lambda agent_name: isinstance(agent_name, str) and len(agent_name) > 0, "Agent name must be non-empty string")
621
+ @require(lambda log_level: isinstance(log_level, str) and len(log_level) > 0, "Log level must be non-empty string")
622
+ @ensure(lambda result: isinstance(result, logging.Logger), "Must return Logger instance")
623
+ def setup_logger(
624
+ agent_name: str,
625
+ log_level: str = "INFO",
626
+ session_id: str | None = None,
627
+ log_file: str | None = None,
628
+ use_rotating_file: bool = True,
629
+ ) -> logging.Logger:
630
+ """
631
+ Set up a logger with the given name and log level
632
+
633
+ Args:
634
+ agent_name: Name of the agent
635
+ log_level: Log level (default: INFO)
636
+ session_id: Optional unique session ID to include in all log messages
637
+ log_file: Optional file path for logging
638
+ use_rotating_file: Whether to use rotating file handler (default: True)
639
+
640
+ Returns:
641
+ Configured logger
642
+ """
643
+ # Use the LoggerSetup class for consistent logging setup
644
+ return LoggerSetup.create_logger(
645
+ agent_name,
646
+ log_file=log_file,
647
+ agent_name=agent_name,
648
+ log_level=log_level,
649
+ session_id=session_id,
650
+ use_rotating_file=use_rotating_file,
651
+ )
652
+
653
+
654
+ atexit.register(LoggerSetup.shutdown_listeners)
@@ -0,0 +1,41 @@
1
+ """Logging helpers with graceful fallback when SpecFact CLI common module is unavailable."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+
7
+ from beartype import beartype
8
+ from icontract import ensure, require
9
+
10
+
11
+ @beartype
12
+ @require(lambda name: isinstance(name, str) and len(name) > 0, "Name must be non-empty string")
13
+ @require(lambda level: isinstance(level, str) and len(level) > 0, "Level must be non-empty string")
14
+ @ensure(lambda result: isinstance(result, logging.Logger), "Must return Logger instance")
15
+ def get_bridge_logger(name: str, level: str = "INFO") -> logging.Logger:
16
+ """
17
+ Retrieve a configured logger.
18
+
19
+ If the SpecFact CLI `common.logger_setup` module is available we reuse it, otherwise
20
+ we create a standard library logger to keep the bridge self-contained.
21
+ """
22
+ logger = _try_common_logger(name, level)
23
+ if logger is not None:
24
+ return logger
25
+
26
+ fallback_logger = logging.getLogger(name)
27
+ if not fallback_logger.handlers:
28
+ handler = logging.StreamHandler()
29
+ formatter = logging.Formatter(fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s")
30
+ handler.setFormatter(formatter)
31
+ fallback_logger.addHandler(handler)
32
+ fallback_logger.setLevel(level.upper())
33
+ return fallback_logger
34
+
35
+
36
+ def _try_common_logger(name: str, level: str) -> logging.Logger | None:
37
+ try:
38
+ from specfact_cli.common.logger_setup import LoggerSetup # type: ignore[import]
39
+ except ImportError:
40
+ return None
41
+ return LoggerSetup.create_logger(name, log_level=level)