tweek 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. tweek/__init__.py +16 -0
  2. tweek/cli.py +3390 -0
  3. tweek/cli_helpers.py +193 -0
  4. tweek/config/__init__.py +13 -0
  5. tweek/config/allowed_dirs.yaml +23 -0
  6. tweek/config/manager.py +1064 -0
  7. tweek/config/patterns.yaml +751 -0
  8. tweek/config/tiers.yaml +129 -0
  9. tweek/diagnostics.py +589 -0
  10. tweek/hooks/__init__.py +1 -0
  11. tweek/hooks/pre_tool_use.py +861 -0
  12. tweek/integrations/__init__.py +3 -0
  13. tweek/integrations/moltbot.py +243 -0
  14. tweek/licensing.py +398 -0
  15. tweek/logging/__init__.py +9 -0
  16. tweek/logging/bundle.py +350 -0
  17. tweek/logging/json_logger.py +150 -0
  18. tweek/logging/security_log.py +745 -0
  19. tweek/mcp/__init__.py +24 -0
  20. tweek/mcp/approval.py +456 -0
  21. tweek/mcp/approval_cli.py +356 -0
  22. tweek/mcp/clients/__init__.py +37 -0
  23. tweek/mcp/clients/chatgpt.py +112 -0
  24. tweek/mcp/clients/claude_desktop.py +203 -0
  25. tweek/mcp/clients/gemini.py +178 -0
  26. tweek/mcp/proxy.py +667 -0
  27. tweek/mcp/screening.py +175 -0
  28. tweek/mcp/server.py +317 -0
  29. tweek/platform/__init__.py +131 -0
  30. tweek/plugins/__init__.py +835 -0
  31. tweek/plugins/base.py +1080 -0
  32. tweek/plugins/compliance/__init__.py +30 -0
  33. tweek/plugins/compliance/gdpr.py +333 -0
  34. tweek/plugins/compliance/gov.py +324 -0
  35. tweek/plugins/compliance/hipaa.py +285 -0
  36. tweek/plugins/compliance/legal.py +322 -0
  37. tweek/plugins/compliance/pci.py +361 -0
  38. tweek/plugins/compliance/soc2.py +275 -0
  39. tweek/plugins/detectors/__init__.py +30 -0
  40. tweek/plugins/detectors/continue_dev.py +206 -0
  41. tweek/plugins/detectors/copilot.py +254 -0
  42. tweek/plugins/detectors/cursor.py +192 -0
  43. tweek/plugins/detectors/moltbot.py +205 -0
  44. tweek/plugins/detectors/windsurf.py +214 -0
  45. tweek/plugins/git_discovery.py +395 -0
  46. tweek/plugins/git_installer.py +491 -0
  47. tweek/plugins/git_lockfile.py +338 -0
  48. tweek/plugins/git_registry.py +503 -0
  49. tweek/plugins/git_security.py +482 -0
  50. tweek/plugins/providers/__init__.py +30 -0
  51. tweek/plugins/providers/anthropic.py +181 -0
  52. tweek/plugins/providers/azure_openai.py +289 -0
  53. tweek/plugins/providers/bedrock.py +248 -0
  54. tweek/plugins/providers/google.py +197 -0
  55. tweek/plugins/providers/openai.py +230 -0
  56. tweek/plugins/scope.py +130 -0
  57. tweek/plugins/screening/__init__.py +26 -0
  58. tweek/plugins/screening/llm_reviewer.py +149 -0
  59. tweek/plugins/screening/pattern_matcher.py +273 -0
  60. tweek/plugins/screening/rate_limiter.py +174 -0
  61. tweek/plugins/screening/session_analyzer.py +159 -0
  62. tweek/proxy/__init__.py +302 -0
  63. tweek/proxy/addon.py +223 -0
  64. tweek/proxy/interceptor.py +313 -0
  65. tweek/proxy/server.py +315 -0
  66. tweek/sandbox/__init__.py +71 -0
  67. tweek/sandbox/executor.py +382 -0
  68. tweek/sandbox/linux.py +278 -0
  69. tweek/sandbox/profile_generator.py +323 -0
  70. tweek/screening/__init__.py +13 -0
  71. tweek/screening/context.py +81 -0
  72. tweek/security/__init__.py +22 -0
  73. tweek/security/llm_reviewer.py +348 -0
  74. tweek/security/rate_limiter.py +682 -0
  75. tweek/security/secret_scanner.py +506 -0
  76. tweek/security/session_analyzer.py +600 -0
  77. tweek/vault/__init__.py +40 -0
  78. tweek/vault/cross_platform.py +251 -0
  79. tweek/vault/keychain.py +288 -0
  80. tweek-0.1.0.dist-info/METADATA +335 -0
  81. tweek-0.1.0.dist-info/RECORD +85 -0
  82. tweek-0.1.0.dist-info/WHEEL +5 -0
  83. tweek-0.1.0.dist-info/entry_points.txt +25 -0
  84. tweek-0.1.0.dist-info/licenses/LICENSE +190 -0
  85. tweek-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,745 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Tweek Security Logger
4
+
5
+ SQLite-based audit logging for security events.
6
+ Logs all tool/skill invocations, screening decisions, and user responses.
7
+
8
+ Database location: ~/.tweek/security.db
9
+
10
+ Includes log redaction for sensitive data based on moltbot's security hardening.
11
+ """
12
+
13
+ import json
14
+ import os
15
+ import re
16
+ import sqlite3
17
+ from contextlib import contextmanager
18
+ from dataclasses import dataclass, asdict
19
+ from datetime import datetime
20
+ from enum import Enum
21
+ from pathlib import Path
22
+ from typing import Optional, List, Dict, Any, Pattern
23
+
24
+
25
+ class LogRedactor:
26
+ """
27
+ Redacts sensitive information from log data.
28
+
29
+ Based on moltbot's log-redaction security feature.
30
+ Ensures secrets, tokens, and credentials are never written to logs.
31
+ """
32
+
33
+ # Patterns for sensitive data that should be redacted
34
+ REDACTION_PATTERNS: List[tuple[str, Pattern, str]] = [
35
+ # API Keys - various formats
36
+ ("api_key", re.compile(
37
+ r'(?i)(api[_-]?key|apikey|secret[_-]?key)[\s:=]+[\'\"]?([A-Za-z0-9_-]{16,})[\'\"]?'
38
+ ), r'\1=***REDACTED***'),
39
+
40
+ # AWS Access Keys
41
+ ("aws_key", re.compile(
42
+ r'((?:A3T[A-Z0-9]|AKIA|ABIA|ACCA|ASIA)[A-Z0-9]{16})'
43
+ ), '***AWS_KEY_REDACTED***'),
44
+
45
+ # AWS Secret Keys (40 char)
46
+ ("aws_secret", re.compile(
47
+ r'(?i)(aws[_-]?secret[_-]?access[_-]?key|aws[_-]?secret)[\s:=]+[\'\"]?([A-Za-z0-9/+=]{40})[\'\"]?'
48
+ ), r'\1=***REDACTED***'),
49
+
50
+ # Bearer tokens
51
+ ("bearer", re.compile(
52
+ r'(?i)(bearer)\s+([A-Za-z0-9_-]{20,})'
53
+ ), r'\1 ***REDACTED***'),
54
+
55
+ # JWT tokens
56
+ ("jwt", re.compile(
57
+ r'(eyJ[A-Za-z0-9_-]*\.eyJ[A-Za-z0-9_-]*\.[A-Za-z0-9_-]*)'
58
+ ), '***JWT_REDACTED***'),
59
+
60
+ # GitHub tokens
61
+ ("github", re.compile(
62
+ r'(gh[pousr]_[A-Za-z0-9_]{36,})'
63
+ ), '***GITHUB_TOKEN_REDACTED***'),
64
+
65
+ # Slack tokens
66
+ ("slack", re.compile(
67
+ r'(xox[baprs]-[0-9]{10,13}-[0-9]{10,13}[a-zA-Z0-9-]*)'
68
+ ), '***SLACK_TOKEN_REDACTED***'),
69
+
70
+ # Generic passwords in assignments
71
+ ("password", re.compile(
72
+ r'(?i)(password|passwd|pwd|secret)[\s:=]+[\'\"]?([^\s\'\"\n]{8,})[\'\"]?'
73
+ ), r'\1=***REDACTED***'),
74
+
75
+ # Connection strings with credentials
76
+ ("connection_string", re.compile(
77
+ r'(?i)(mongodb|postgres|mysql|redis|amqp)://([^:]+):([^@]+)@'
78
+ ), r'\1://\2:***REDACTED***@'),
79
+
80
+ # Private key headers
81
+ ("private_key", re.compile(
82
+ r'(-----BEGIN (?:RSA |DSA |EC |OPENSSH )?PRIVATE KEY-----[\s\S]*?-----END (?:RSA |DSA |EC |OPENSSH )?PRIVATE KEY-----)'
83
+ ), '***PRIVATE_KEY_REDACTED***'),
84
+
85
+ # Base64 encoded secrets (long base64 strings in sensitive contexts)
86
+ ("base64_secret", re.compile(
87
+ r'(?i)(secret|key|token|credential)[\s:=]+[\'\"]?([A-Za-z0-9+/]{40,}={0,2})[\'\"]?'
88
+ ), r'\1=***REDACTED***'),
89
+
90
+ # Email addresses (for privacy)
91
+ ("email", re.compile(
92
+ r'([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,})'
93
+ ), '***EMAIL_REDACTED***'),
94
+
95
+ # Credit card numbers
96
+ ("credit_card", re.compile(
97
+ r'\b([0-9]{4}[- ]?[0-9]{4}[- ]?[0-9]{4}[- ]?[0-9]{4})\b'
98
+ ), '***CARD_REDACTED***'),
99
+
100
+ # SSH private key paths being read
101
+ ("ssh_key_read", re.compile(
102
+ r'(?i)(cat|less|more|head|tail|read)\s+["\']?(~?/[^\s]*\.pem|~?/\.ssh/[^\s]*)["\']?'
103
+ ), r'\1 ***SSH_PATH_REDACTED***'),
104
+ ]
105
+
106
+ # Keys in dictionaries that should have their values redacted
107
+ SENSITIVE_KEYS = {
108
+ 'password', 'passwd', 'pwd', 'secret', 'token', 'api_key', 'apikey',
109
+ 'access_key', 'secret_key', 'private_key', 'credential', 'auth',
110
+ 'bearer', 'jwt', 'session', 'cookie', 'oauth', 'refresh_token',
111
+ 'client_secret', 'app_secret', 'webhook_secret', 'signing_key',
112
+ 'encryption_key', 'decryption_key', 'master_key', 'root_password',
113
+ }
114
+
115
+ def __init__(self, enabled: bool = True):
116
+ """
117
+ Initialize the log redactor.
118
+
119
+ Args:
120
+ enabled: Whether redaction is enabled (default True)
121
+ """
122
+ self.enabled = enabled
123
+
124
+ def redact_string(self, text: str) -> str:
125
+ """
126
+ Redact sensitive information from a string.
127
+
128
+ Args:
129
+ text: The text to redact
130
+
131
+ Returns:
132
+ Redacted text with sensitive data replaced
133
+ """
134
+ if not self.enabled or not text:
135
+ return text
136
+
137
+ result = text
138
+ for name, pattern, replacement in self.REDACTION_PATTERNS:
139
+ result = pattern.sub(replacement, result)
140
+
141
+ return result
142
+
143
+ def redact_dict(self, data: Dict[str, Any], depth: int = 0) -> Dict[str, Any]:
144
+ """
145
+ Redact sensitive information from a dictionary.
146
+
147
+ Args:
148
+ data: Dictionary to redact
149
+ depth: Current recursion depth (to prevent infinite loops)
150
+
151
+ Returns:
152
+ Dictionary with sensitive values redacted
153
+ """
154
+ if not self.enabled or not data or depth > 10:
155
+ return data
156
+
157
+ result = {}
158
+ for key, value in data.items():
159
+ key_lower = key.lower()
160
+
161
+ # Check if key is sensitive
162
+ is_sensitive = any(
163
+ sensitive in key_lower
164
+ for sensitive in self.SENSITIVE_KEYS
165
+ )
166
+
167
+ if is_sensitive and isinstance(value, str):
168
+ # Redact the entire value
169
+ result[key] = "***REDACTED***"
170
+ elif isinstance(value, str):
171
+ # Apply pattern-based redaction
172
+ result[key] = self.redact_string(value)
173
+ elif isinstance(value, dict):
174
+ # Recursively redact nested dicts
175
+ result[key] = self.redact_dict(value, depth + 1)
176
+ elif isinstance(value, list):
177
+ # Redact items in lists
178
+ result[key] = [
179
+ self.redact_dict(item, depth + 1) if isinstance(item, dict)
180
+ else self.redact_string(item) if isinstance(item, str)
181
+ else item
182
+ for item in value
183
+ ]
184
+ else:
185
+ result[key] = value
186
+
187
+ return result
188
+
189
+ def redact_command(self, command: str) -> str:
190
+ """
191
+ Redact sensitive information from a command string.
192
+
193
+ Args:
194
+ command: Command to redact
195
+
196
+ Returns:
197
+ Redacted command
198
+ """
199
+ if not self.enabled or not command:
200
+ return command
201
+
202
+ # Apply general string redaction
203
+ result = self.redact_string(command)
204
+
205
+ # Additional command-specific patterns
206
+ command_patterns = [
207
+ # curl with auth headers - capture everything after Authorization:
208
+ (re.compile(r'(-H\s+["\']?Authorization:\s*(?:Bearer\s+)?)[^"\']+(["\'])'), r'\1***REDACTED***\2'),
209
+ # curl with data containing secrets
210
+ (re.compile(r'(-d\s+["\'][^"\']*(?:password|secret|token)[^"\']*=)[^&"\'\s]+'), r'\1***REDACTED***'),
211
+ # Environment variable exports with secrets
212
+ (re.compile(r'(?i)(export\s+(?:\w*(?:KEY|SECRET|TOKEN|PASSWORD)\w*)\s*=\s*)[^\s;]+'), r'\1***REDACTED***'),
213
+ # inline environment variables
214
+ (re.compile(r'(?i)(\w*(?:KEY|SECRET|TOKEN|PASSWORD)\w*=)[^\s;]+'), r'\1***REDACTED***'),
215
+ ]
216
+
217
+ for pattern, replacement in command_patterns:
218
+ result = pattern.sub(replacement, result)
219
+
220
+ return result
221
+
222
+
223
+ # Singleton redactor instance
224
+ _redactor: Optional[LogRedactor] = None
225
+
226
+
227
+ def get_redactor(enabled: bool = True) -> LogRedactor:
228
+ """Get the singleton log redactor instance."""
229
+ global _redactor
230
+ if _redactor is None:
231
+ _redactor = LogRedactor(enabled=enabled)
232
+ return _redactor
233
+
234
+
235
+ class EventType(Enum):
236
+ """Types of security events."""
237
+ # Core screening events
238
+ TOOL_INVOKED = "tool_invoked" # Tool call received
239
+ PATTERN_MATCH = "pattern_match" # Regex pattern matched
240
+ LLM_RULE_MATCH = "llm_rule_match" # LLM rule flagged
241
+ ESCALATION = "escalation" # Tier escalated due to content
242
+ ALLOWED = "allowed" # Execution permitted
243
+ BLOCKED = "blocked" # Execution blocked
244
+ USER_PROMPTED = "user_prompted" # User asked for confirmation
245
+ USER_APPROVED = "user_approved" # User approved after prompt
246
+ USER_DENIED = "user_denied" # User denied after prompt
247
+ SANDBOX_PREVIEW = "sandbox_preview" # Sandbox preview executed
248
+ ERROR = "error" # Error during processing
249
+
250
+ # Vault events
251
+ VAULT_ACCESS = "vault_access" # Credential store/get/delete
252
+ VAULT_MIGRATION = "vault_migration" # .env migration to vault
253
+
254
+ # Configuration events
255
+ CONFIG_CHANGE = "config_change" # Tier/preset/config modification
256
+
257
+ # License events
258
+ LICENSE_EVENT = "license_event" # Activation, deactivation, validation
259
+
260
+ # Advanced screening events
261
+ RATE_LIMIT = "rate_limit" # Rate limit violation
262
+ SESSION_ANOMALY = "session_anomaly" # Session analysis anomaly detected
263
+ CIRCUIT_BREAKER = "circuit_breaker" # Circuit breaker state transition
264
+
265
+ # Plugin events
266
+ PLUGIN_EVENT = "plugin_event" # Plugin load, failure, scan result
267
+
268
+ # MCP events
269
+ MCP_APPROVAL = "mcp_approval" # MCP approval queue decision
270
+
271
+ # Proxy events
272
+ PROXY_EVENT = "proxy_event" # HTTP proxy request screening
273
+
274
+ # System events
275
+ HEALTH_CHECK = "health_check" # Diagnostic check results
276
+ STARTUP = "startup" # System initialization
277
+
278
+
279
+ @dataclass
280
+ class SecurityEvent:
281
+ """A security event to be logged."""
282
+ event_type: EventType
283
+ tool_name: str
284
+ command: Optional[str] = None
285
+ tier: Optional[str] = None
286
+ pattern_name: Optional[str] = None
287
+ pattern_severity: Optional[str] = None
288
+ decision: Optional[str] = None # allow, block, ask
289
+ decision_reason: Optional[str] = None
290
+ user_response: Optional[str] = None # approved, denied
291
+ metadata: Optional[Dict[str, Any]] = None
292
+ session_id: Optional[str] = None
293
+ working_directory: Optional[str] = None
294
+ correlation_id: Optional[str] = None # Links related events in a screening pass
295
+ source: Optional[str] = None # "hooks", "mcp", "mcp_proxy", "http_proxy"
296
+
297
+
298
+ class SecurityLogger:
299
+ """SQLite-based security event logger with automatic redaction."""
300
+
301
+ DEFAULT_DB_PATH = Path.home() / ".tweek" / "security.db"
302
+
303
+ def __init__(
304
+ self,
305
+ db_path: Optional[Path] = None,
306
+ redact_logs: bool = True
307
+ ):
308
+ """Initialize the security logger.
309
+
310
+ Args:
311
+ db_path: Path to SQLite database. Defaults to ~/.tweek/security.db
312
+ redact_logs: Whether to redact sensitive data before logging (default True)
313
+ """
314
+ self.db_path = db_path or self.DEFAULT_DB_PATH
315
+ # Create own redactor instance instead of using singleton
316
+ self.redactor = LogRedactor(enabled=redact_logs)
317
+ self._ensure_db_exists()
318
+
319
+ def _ensure_db_exists(self):
320
+ """Create database and tables if they don't exist."""
321
+ self.db_path.parent.mkdir(parents=True, exist_ok=True)
322
+
323
+ with self._get_connection() as conn:
324
+ # Create table first (without views that reference new columns)
325
+ conn.executescript("""
326
+ -- Main events table
327
+ CREATE TABLE IF NOT EXISTS security_events (
328
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
329
+ timestamp TEXT NOT NULL DEFAULT (datetime('now')),
330
+ event_type TEXT NOT NULL,
331
+ tool_name TEXT NOT NULL,
332
+ command TEXT,
333
+ tier TEXT,
334
+ pattern_name TEXT,
335
+ pattern_severity TEXT,
336
+ decision TEXT,
337
+ decision_reason TEXT,
338
+ user_response TEXT,
339
+ session_id TEXT,
340
+ working_directory TEXT,
341
+ metadata_json TEXT,
342
+ correlation_id TEXT,
343
+ source TEXT
344
+ );
345
+
346
+ -- Index for common queries
347
+ CREATE INDEX IF NOT EXISTS idx_events_timestamp
348
+ ON security_events(timestamp);
349
+ CREATE INDEX IF NOT EXISTS idx_events_type
350
+ ON security_events(event_type);
351
+ CREATE INDEX IF NOT EXISTS idx_events_tool
352
+ ON security_events(tool_name);
353
+ CREATE INDEX IF NOT EXISTS idx_events_decision
354
+ ON security_events(decision);
355
+ CREATE INDEX IF NOT EXISTS idx_events_session
356
+ ON security_events(session_id);
357
+ """)
358
+
359
+ # Migrate existing databases that lack new columns
360
+ # (must happen BEFORE creating views that reference new columns)
361
+ self._migrate_schema(conn)
362
+
363
+ # Now create indexes and views that reference new columns
364
+ conn.executescript("""
365
+ CREATE INDEX IF NOT EXISTS idx_events_correlation
366
+ ON security_events(correlation_id);
367
+ CREATE INDEX IF NOT EXISTS idx_events_source
368
+ ON security_events(source);
369
+
370
+ -- Summary statistics view (recreate to include new columns)
371
+ DROP VIEW IF EXISTS event_summary;
372
+ CREATE VIEW event_summary AS
373
+ SELECT
374
+ date(timestamp) as date,
375
+ event_type,
376
+ tool_name,
377
+ decision,
378
+ source,
379
+ COUNT(*) as count
380
+ FROM security_events
381
+ GROUP BY date(timestamp), event_type, tool_name, decision, source;
382
+
383
+ -- Recent blocks view (recreate to include new columns)
384
+ DROP VIEW IF EXISTS recent_blocks;
385
+ CREATE VIEW recent_blocks AS
386
+ SELECT
387
+ timestamp,
388
+ tool_name,
389
+ command,
390
+ pattern_name,
391
+ pattern_severity,
392
+ decision_reason,
393
+ correlation_id,
394
+ source
395
+ FROM security_events
396
+ WHERE decision IN ('block', 'ask')
397
+ ORDER BY timestamp DESC
398
+ LIMIT 100;
399
+ """)
400
+
401
+ def _migrate_schema(self, conn):
402
+ """Add new columns to existing databases if missing."""
403
+ existing_columns = {
404
+ row[1] for row in conn.execute("PRAGMA table_info(security_events)").fetchall()
405
+ }
406
+ migrations = [
407
+ ("correlation_id", "TEXT"),
408
+ ("source", "TEXT"),
409
+ ]
410
+ for col_name, col_type in migrations:
411
+ if col_name not in existing_columns:
412
+ conn.execute(f"ALTER TABLE security_events ADD COLUMN {col_name} {col_type}")
413
+
414
+ # Ensure new indexes exist (safe to re-run)
415
+ conn.execute(
416
+ "CREATE INDEX IF NOT EXISTS idx_events_correlation ON security_events(correlation_id)"
417
+ )
418
+ conn.execute(
419
+ "CREATE INDEX IF NOT EXISTS idx_events_source ON security_events(source)"
420
+ )
421
+
422
+ @contextmanager
423
+ def _get_connection(self):
424
+ """Get a database connection with proper cleanup."""
425
+ conn = sqlite3.connect(str(self.db_path))
426
+ conn.row_factory = sqlite3.Row
427
+ try:
428
+ yield conn
429
+ conn.commit()
430
+ finally:
431
+ conn.close()
432
+
433
+ def log(self, event: SecurityEvent) -> int:
434
+ """Log a security event with automatic redaction of sensitive data.
435
+
436
+ Args:
437
+ event: The security event to log
438
+
439
+ Returns:
440
+ The row ID of the inserted event
441
+ """
442
+ # Redact sensitive data before logging
443
+ redacted_command = self.redactor.redact_command(event.command) if event.command else None
444
+ redacted_reason = self.redactor.redact_string(event.decision_reason) if event.decision_reason else None
445
+ redacted_metadata = self.redactor.redact_dict(event.metadata) if event.metadata else None
446
+
447
+ with self._get_connection() as conn:
448
+ cursor = conn.execute("""
449
+ INSERT INTO security_events (
450
+ event_type, tool_name, command, tier,
451
+ pattern_name, pattern_severity,
452
+ decision, decision_reason, user_response,
453
+ session_id, working_directory, metadata_json,
454
+ correlation_id, source
455
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
456
+ """, (
457
+ event.event_type.value,
458
+ event.tool_name,
459
+ redacted_command,
460
+ event.tier,
461
+ event.pattern_name,
462
+ event.pattern_severity,
463
+ event.decision,
464
+ redacted_reason,
465
+ event.user_response,
466
+ event.session_id,
467
+ event.working_directory,
468
+ json.dumps(redacted_metadata) if redacted_metadata else None,
469
+ event.correlation_id,
470
+ event.source,
471
+ ))
472
+ row_id = cursor.lastrowid
473
+
474
+ # Also write to JSON logger if available
475
+ self._write_json_event(event, redacted_command, redacted_reason, redacted_metadata)
476
+
477
+ return row_id
478
+
479
+ def _write_json_event(
480
+ self,
481
+ event: SecurityEvent,
482
+ redacted_command: Optional[str],
483
+ redacted_reason: Optional[str],
484
+ redacted_metadata: Optional[Dict[str, Any]],
485
+ ):
486
+ """Write event to NDJSON log file if JSON logging is enabled."""
487
+ try:
488
+ from tweek.logging.json_logger import get_json_logger
489
+ json_logger = get_json_logger()
490
+ if json_logger and json_logger.enabled:
491
+ json_logger.write_event(
492
+ event, redacted_command, redacted_reason, redacted_metadata
493
+ )
494
+ except Exception:
495
+ pass # JSON logging should never break primary logging
496
+
497
+ def log_quick(
498
+ self,
499
+ event_type: EventType,
500
+ tool_name: str,
501
+ command: Optional[str] = None,
502
+ **kwargs
503
+ ) -> int:
504
+ """Quick logging helper.
505
+
506
+ Args:
507
+ event_type: Type of event
508
+ tool_name: Name of the tool
509
+ command: The command being executed
510
+ **kwargs: Additional event fields
511
+
512
+ Returns:
513
+ The row ID of the inserted event
514
+ """
515
+ event = SecurityEvent(
516
+ event_type=event_type,
517
+ tool_name=tool_name,
518
+ command=command,
519
+ **kwargs
520
+ )
521
+ return self.log(event)
522
+
523
+ def get_recent_events(
524
+ self,
525
+ limit: int = 50,
526
+ event_type: Optional[EventType] = None,
527
+ tool_name: Optional[str] = None
528
+ ) -> List[Dict]:
529
+ """Get recent security events.
530
+
531
+ Args:
532
+ limit: Maximum number of events to return
533
+ event_type: Filter by event type
534
+ tool_name: Filter by tool name
535
+
536
+ Returns:
537
+ List of event dictionaries
538
+ """
539
+ query = "SELECT * FROM security_events WHERE 1=1"
540
+ params = []
541
+
542
+ if event_type:
543
+ query += " AND event_type = ?"
544
+ params.append(event_type.value)
545
+
546
+ if tool_name:
547
+ query += " AND tool_name = ?"
548
+ params.append(tool_name)
549
+
550
+ query += " ORDER BY timestamp DESC LIMIT ?"
551
+ params.append(limit)
552
+
553
+ with self._get_connection() as conn:
554
+ rows = conn.execute(query, params).fetchall()
555
+ return [dict(row) for row in rows]
556
+
557
+ def get_stats(self, days: int = 7) -> Dict[str, Any]:
558
+ """Get security statistics for the specified period.
559
+
560
+ Args:
561
+ days: Number of days to include
562
+
563
+ Returns:
564
+ Dictionary with statistics
565
+ """
566
+ with self._get_connection() as conn:
567
+ # Total events
568
+ total = conn.execute("""
569
+ SELECT COUNT(*) as count FROM security_events
570
+ WHERE timestamp > datetime('now', ?)
571
+ """, (f'-{days} days',)).fetchone()['count']
572
+
573
+ # Events by decision
574
+ decisions = conn.execute("""
575
+ SELECT decision, COUNT(*) as count
576
+ FROM security_events
577
+ WHERE timestamp > datetime('now', ?)
578
+ AND decision IS NOT NULL
579
+ GROUP BY decision
580
+ """, (f'-{days} days',)).fetchall()
581
+
582
+ # Top triggered patterns
583
+ patterns = conn.execute("""
584
+ SELECT pattern_name, pattern_severity, COUNT(*) as count
585
+ FROM security_events
586
+ WHERE timestamp > datetime('now', ?)
587
+ AND pattern_name IS NOT NULL
588
+ GROUP BY pattern_name, pattern_severity
589
+ ORDER BY count DESC
590
+ LIMIT 10
591
+ """, (f'-{days} days',)).fetchall()
592
+
593
+ # Events by tool
594
+ by_tool = conn.execute("""
595
+ SELECT tool_name, COUNT(*) as count
596
+ FROM security_events
597
+ WHERE timestamp > datetime('now', ?)
598
+ GROUP BY tool_name
599
+ ORDER BY count DESC
600
+ """, (f'-{days} days',)).fetchall()
601
+
602
+ return {
603
+ 'period_days': days,
604
+ 'total_events': total,
605
+ 'by_decision': {row['decision']: row['count'] for row in decisions},
606
+ 'top_patterns': [
607
+ {
608
+ 'name': row['pattern_name'],
609
+ 'severity': row['pattern_severity'],
610
+ 'count': row['count']
611
+ }
612
+ for row in patterns
613
+ ],
614
+ 'by_tool': {row['tool_name']: row['count'] for row in by_tool}
615
+ }
616
+
617
+ def get_blocked_commands(self, limit: int = 20) -> List[Dict]:
618
+ """Get recently blocked or flagged commands.
619
+
620
+ Args:
621
+ limit: Maximum number to return
622
+
623
+ Returns:
624
+ List of blocked command details
625
+ """
626
+ with self._get_connection() as conn:
627
+ rows = conn.execute("""
628
+ SELECT * FROM recent_blocks LIMIT ?
629
+ """, (limit,)).fetchall()
630
+ return [dict(row) for row in rows]
631
+
632
+ def get_recent(self, limit: int = 10) -> List[SecurityEvent]:
633
+ """Get recent events as SecurityEvent objects.
634
+
635
+ Args:
636
+ limit: Maximum number of events
637
+
638
+ Returns:
639
+ List of SecurityEvent objects (most recent first)
640
+ """
641
+ with self._get_connection() as conn:
642
+ rows = conn.execute(
643
+ "SELECT * FROM security_events ORDER BY timestamp DESC LIMIT ?",
644
+ (limit,),
645
+ ).fetchall()
646
+
647
+ events = []
648
+ for row in rows:
649
+ row_dict = dict(row)
650
+ try:
651
+ metadata = json.loads(row_dict.get("metadata_json")) if row_dict.get("metadata_json") else None
652
+ except (json.JSONDecodeError, TypeError):
653
+ metadata = None
654
+
655
+ events.append(SecurityEvent(
656
+ event_type=EventType(row_dict["event_type"]),
657
+ tool_name=row_dict["tool_name"],
658
+ command=row_dict.get("command"),
659
+ tier=row_dict.get("tier"),
660
+ pattern_name=row_dict.get("pattern_name"),
661
+ pattern_severity=row_dict.get("pattern_severity"),
662
+ decision=row_dict.get("decision"),
663
+ decision_reason=row_dict.get("decision_reason"),
664
+ user_response=row_dict.get("user_response"),
665
+ metadata=metadata,
666
+ session_id=row_dict.get("session_id"),
667
+ working_directory=row_dict.get("working_directory"),
668
+ correlation_id=row_dict.get("correlation_id"),
669
+ source=row_dict.get("source"),
670
+ ))
671
+ return events
672
+
673
+ def delete_events(self, days: Optional[int] = None) -> int:
674
+ """Delete events from the database.
675
+
676
+ Args:
677
+ days: If provided, only delete events older than this many days.
678
+ If None, delete all events.
679
+
680
+ Returns:
681
+ Number of events deleted
682
+ """
683
+ with self._get_connection() as conn:
684
+ if days is not None:
685
+ cursor = conn.execute(
686
+ "DELETE FROM security_events WHERE timestamp < datetime('now', ?)",
687
+ (f'-{days} days',),
688
+ )
689
+ else:
690
+ cursor = conn.execute("DELETE FROM security_events")
691
+ return cursor.rowcount
692
+
693
+ def export_csv(self, filepath: Path, days: Optional[int] = None) -> int:
694
+ """Export events to CSV file.
695
+
696
+ Args:
697
+ filepath: Path to write CSV
698
+ days: Optional limit to recent N days
699
+
700
+ Returns:
701
+ Number of rows exported
702
+ """
703
+ import csv
704
+
705
+ query = "SELECT * FROM security_events"
706
+ params = []
707
+
708
+ if days:
709
+ query += " WHERE timestamp > datetime('now', ?)"
710
+ params.append(f'-{days} days')
711
+
712
+ query += " ORDER BY timestamp DESC"
713
+
714
+ with self._get_connection() as conn:
715
+ rows = conn.execute(query, params).fetchall()
716
+
717
+ if not rows:
718
+ return 0
719
+
720
+ with open(filepath, 'w', newline='') as f:
721
+ writer = csv.DictWriter(f, fieldnames=rows[0].keys())
722
+ writer.writeheader()
723
+ for row in rows:
724
+ writer.writerow(dict(row))
725
+
726
+ return len(rows)
727
+
728
+
729
+ # Singleton instance for easy access
730
+ _logger: Optional[SecurityLogger] = None
731
+
732
+
733
+ def get_logger(redact_logs: bool = True) -> SecurityLogger:
734
+ """Get the singleton security logger instance.
735
+
736
+ Args:
737
+ redact_logs: Whether to enable log redaction (default True)
738
+
739
+ Returns:
740
+ SecurityLogger instance
741
+ """
742
+ global _logger
743
+ if _logger is None:
744
+ _logger = SecurityLogger(redact_logs=redact_logs)
745
+ return _logger