tweek 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tweek/__init__.py +16 -0
- tweek/cli.py +3390 -0
- tweek/cli_helpers.py +193 -0
- tweek/config/__init__.py +13 -0
- tweek/config/allowed_dirs.yaml +23 -0
- tweek/config/manager.py +1064 -0
- tweek/config/patterns.yaml +751 -0
- tweek/config/tiers.yaml +129 -0
- tweek/diagnostics.py +589 -0
- tweek/hooks/__init__.py +1 -0
- tweek/hooks/pre_tool_use.py +861 -0
- tweek/integrations/__init__.py +3 -0
- tweek/integrations/moltbot.py +243 -0
- tweek/licensing.py +398 -0
- tweek/logging/__init__.py +9 -0
- tweek/logging/bundle.py +350 -0
- tweek/logging/json_logger.py +150 -0
- tweek/logging/security_log.py +745 -0
- tweek/mcp/__init__.py +24 -0
- tweek/mcp/approval.py +456 -0
- tweek/mcp/approval_cli.py +356 -0
- tweek/mcp/clients/__init__.py +37 -0
- tweek/mcp/clients/chatgpt.py +112 -0
- tweek/mcp/clients/claude_desktop.py +203 -0
- tweek/mcp/clients/gemini.py +178 -0
- tweek/mcp/proxy.py +667 -0
- tweek/mcp/screening.py +175 -0
- tweek/mcp/server.py +317 -0
- tweek/platform/__init__.py +131 -0
- tweek/plugins/__init__.py +835 -0
- tweek/plugins/base.py +1080 -0
- tweek/plugins/compliance/__init__.py +30 -0
- tweek/plugins/compliance/gdpr.py +333 -0
- tweek/plugins/compliance/gov.py +324 -0
- tweek/plugins/compliance/hipaa.py +285 -0
- tweek/plugins/compliance/legal.py +322 -0
- tweek/plugins/compliance/pci.py +361 -0
- tweek/plugins/compliance/soc2.py +275 -0
- tweek/plugins/detectors/__init__.py +30 -0
- tweek/plugins/detectors/continue_dev.py +206 -0
- tweek/plugins/detectors/copilot.py +254 -0
- tweek/plugins/detectors/cursor.py +192 -0
- tweek/plugins/detectors/moltbot.py +205 -0
- tweek/plugins/detectors/windsurf.py +214 -0
- tweek/plugins/git_discovery.py +395 -0
- tweek/plugins/git_installer.py +491 -0
- tweek/plugins/git_lockfile.py +338 -0
- tweek/plugins/git_registry.py +503 -0
- tweek/plugins/git_security.py +482 -0
- tweek/plugins/providers/__init__.py +30 -0
- tweek/plugins/providers/anthropic.py +181 -0
- tweek/plugins/providers/azure_openai.py +289 -0
- tweek/plugins/providers/bedrock.py +248 -0
- tweek/plugins/providers/google.py +197 -0
- tweek/plugins/providers/openai.py +230 -0
- tweek/plugins/scope.py +130 -0
- tweek/plugins/screening/__init__.py +26 -0
- tweek/plugins/screening/llm_reviewer.py +149 -0
- tweek/plugins/screening/pattern_matcher.py +273 -0
- tweek/plugins/screening/rate_limiter.py +174 -0
- tweek/plugins/screening/session_analyzer.py +159 -0
- tweek/proxy/__init__.py +302 -0
- tweek/proxy/addon.py +223 -0
- tweek/proxy/interceptor.py +313 -0
- tweek/proxy/server.py +315 -0
- tweek/sandbox/__init__.py +71 -0
- tweek/sandbox/executor.py +382 -0
- tweek/sandbox/linux.py +278 -0
- tweek/sandbox/profile_generator.py +323 -0
- tweek/screening/__init__.py +13 -0
- tweek/screening/context.py +81 -0
- tweek/security/__init__.py +22 -0
- tweek/security/llm_reviewer.py +348 -0
- tweek/security/rate_limiter.py +682 -0
- tweek/security/secret_scanner.py +506 -0
- tweek/security/session_analyzer.py +600 -0
- tweek/vault/__init__.py +40 -0
- tweek/vault/cross_platform.py +251 -0
- tweek/vault/keychain.py +288 -0
- tweek-0.1.0.dist-info/METADATA +335 -0
- tweek-0.1.0.dist-info/RECORD +85 -0
- tweek-0.1.0.dist-info/WHEEL +5 -0
- tweek-0.1.0.dist-info/entry_points.txt +25 -0
- tweek-0.1.0.dist-info/licenses/LICENSE +190 -0
- tweek-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,600 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Tweek Session Analyzer
|
|
4
|
+
|
|
5
|
+
Detects cross-turn anomalies indicating conversation hijacking or persistent
|
|
6
|
+
prompt injection attacks.
|
|
7
|
+
|
|
8
|
+
Key detection patterns:
|
|
9
|
+
- Privilege escalation: Progressive access to more sensitive paths
|
|
10
|
+
- Repeated denial attacks: Retrying blocked operations with variations
|
|
11
|
+
- Behavior shift: Sudden change in command patterns
|
|
12
|
+
- Instruction persistence: Signs of injected instructions affecting multiple turns
|
|
13
|
+
|
|
14
|
+
Based on research showing 52.9% of RAG backdoor attacks and 82.4% of inter-agent
|
|
15
|
+
trust exploits succeed by persisting across conversation turns.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import json
|
|
19
|
+
import re
|
|
20
|
+
import sqlite3
|
|
21
|
+
from collections import Counter
|
|
22
|
+
from dataclasses import dataclass, field
|
|
23
|
+
from datetime import datetime, timedelta
|
|
24
|
+
from enum import Enum
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
from typing import Optional, List, Dict, Any, Set, Tuple
|
|
27
|
+
|
|
28
|
+
from tweek.logging.security_log import SecurityLogger, get_logger, EventType
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class AnomalyType(Enum):
|
|
32
|
+
"""Types of session anomalies."""
|
|
33
|
+
PRIVILEGE_ESCALATION = "privilege_escalation"
|
|
34
|
+
PATH_ESCALATION = "path_escalation"
|
|
35
|
+
REPEATED_DENIALS = "repeated_denials"
|
|
36
|
+
BEHAVIOR_SHIFT = "behavior_shift"
|
|
37
|
+
SUSPICIOUS_PATTERN = "suspicious_pattern"
|
|
38
|
+
VELOCITY_CHANGE = "velocity_change"
|
|
39
|
+
TIER_DRIFT = "tier_drift"
|
|
40
|
+
CAPABILITY_AGGREGATION = "capability_aggregation" # ACIP: multi-turn goal building
|
|
41
|
+
GRADUATED_ESCALATION = "graduated_escalation" # ACIP: 3+ denials triggers
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class SessionAnalysis:
|
|
46
|
+
"""Result of session analysis."""
|
|
47
|
+
session_id: str
|
|
48
|
+
risk_score: float # 0.0 - 1.0
|
|
49
|
+
anomalies: List[AnomalyType] = field(default_factory=list)
|
|
50
|
+
details: Dict[str, Any] = field(default_factory=dict)
|
|
51
|
+
recommendations: List[str] = field(default_factory=list)
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def is_suspicious(self) -> bool:
|
|
55
|
+
return self.risk_score >= 0.5 or len(self.anomalies) >= 2
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def is_high_risk(self) -> bool:
|
|
59
|
+
return self.risk_score >= 0.75 or len(self.anomalies) >= 3
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class SessionAnalyzer:
|
|
63
|
+
"""
|
|
64
|
+
Analyzes session history to detect cross-turn anomalies.
|
|
65
|
+
|
|
66
|
+
Uses pattern analysis across multiple conversation turns to detect
|
|
67
|
+
attacks that would be missed by single-command analysis.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
# Path sensitivity levels for escalation detection
|
|
71
|
+
PATH_SENSITIVITY = {
|
|
72
|
+
"safe": [r"/tmp/", r"/var/tmp/", r"\.cache/"],
|
|
73
|
+
"medium": [r"/home/", r"~/", r"\.config/"],
|
|
74
|
+
"high": [r"\.ssh/", r"\.aws/", r"\.kube/", r"\.gnupg/"],
|
|
75
|
+
"critical": [r"id_rsa", r"id_ed25519", r"credentials", r"\.env$", r"secrets"],
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
# Patterns indicating persistent injection
|
|
79
|
+
INJECTION_INDICATORS = [
|
|
80
|
+
r"ignore\s+previous",
|
|
81
|
+
r"you\s+are\s+now",
|
|
82
|
+
r"from\s+now\s+on",
|
|
83
|
+
r"always\s+do",
|
|
84
|
+
r"for\s+all\s+future",
|
|
85
|
+
r"remember\s+to\s+always",
|
|
86
|
+
]
|
|
87
|
+
|
|
88
|
+
def __init__(
|
|
89
|
+
self,
|
|
90
|
+
logger: Optional[SecurityLogger] = None,
|
|
91
|
+
lookback_minutes: int = 30
|
|
92
|
+
):
|
|
93
|
+
"""Initialize the session analyzer.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
logger: Security logger for database access
|
|
97
|
+
lookback_minutes: How far back to analyze
|
|
98
|
+
"""
|
|
99
|
+
self.logger = logger or get_logger()
|
|
100
|
+
self.lookback_minutes = lookback_minutes
|
|
101
|
+
self._ensure_tables()
|
|
102
|
+
|
|
103
|
+
def _ensure_tables(self):
|
|
104
|
+
"""Ensure session tracking tables exist."""
|
|
105
|
+
try:
|
|
106
|
+
with self.logger._get_connection() as conn:
|
|
107
|
+
conn.executescript("""
|
|
108
|
+
-- Session profiles for tracking session-level metrics
|
|
109
|
+
CREATE TABLE IF NOT EXISTS session_profiles (
|
|
110
|
+
session_id TEXT PRIMARY KEY,
|
|
111
|
+
first_seen TEXT NOT NULL,
|
|
112
|
+
last_seen TEXT NOT NULL,
|
|
113
|
+
total_invocations INTEGER DEFAULT 0,
|
|
114
|
+
dangerous_count INTEGER DEFAULT 0,
|
|
115
|
+
denied_count INTEGER DEFAULT 0,
|
|
116
|
+
risk_score REAL DEFAULT 0.0,
|
|
117
|
+
anomaly_flags TEXT, -- JSON array
|
|
118
|
+
metadata TEXT -- JSON object
|
|
119
|
+
);
|
|
120
|
+
|
|
121
|
+
-- Index for time-based queries
|
|
122
|
+
CREATE INDEX IF NOT EXISTS idx_session_last_seen
|
|
123
|
+
ON session_profiles(last_seen);
|
|
124
|
+
""")
|
|
125
|
+
except Exception:
|
|
126
|
+
pass
|
|
127
|
+
|
|
128
|
+
def _get_session_events(
|
|
129
|
+
self,
|
|
130
|
+
conn: sqlite3.Connection,
|
|
131
|
+
session_id: str
|
|
132
|
+
) -> List[Dict]:
|
|
133
|
+
"""Get recent events for a session."""
|
|
134
|
+
query = """
|
|
135
|
+
SELECT * FROM security_events
|
|
136
|
+
WHERE session_id = ?
|
|
137
|
+
AND timestamp > datetime('now', ?)
|
|
138
|
+
ORDER BY timestamp ASC
|
|
139
|
+
"""
|
|
140
|
+
rows = conn.execute(
|
|
141
|
+
query,
|
|
142
|
+
[session_id, f'-{self.lookback_minutes} minutes']
|
|
143
|
+
).fetchall()
|
|
144
|
+
return [dict(row) for row in rows]
|
|
145
|
+
|
|
146
|
+
def _get_path_sensitivity(self, path: str) -> str:
|
|
147
|
+
"""Determine sensitivity level of a path."""
|
|
148
|
+
for level in ["critical", "high", "medium", "safe"]:
|
|
149
|
+
patterns = self.PATH_SENSITIVITY.get(level, [])
|
|
150
|
+
for pattern in patterns:
|
|
151
|
+
if re.search(pattern, path, re.IGNORECASE):
|
|
152
|
+
return level
|
|
153
|
+
return "unknown"
|
|
154
|
+
|
|
155
|
+
def _extract_paths(self, events: List[Dict]) -> List[Tuple[str, str]]:
|
|
156
|
+
"""Extract paths from events with their timestamps."""
|
|
157
|
+
paths = []
|
|
158
|
+
for event in events:
|
|
159
|
+
command = event.get("command", "")
|
|
160
|
+
if not command:
|
|
161
|
+
continue
|
|
162
|
+
|
|
163
|
+
# Extract paths from commands
|
|
164
|
+
path_patterns = [
|
|
165
|
+
r'(?:cat|head|tail|less|more|read)\s+([^\s|>]+)',
|
|
166
|
+
r'(?:ls|cd|find)\s+([^\s|>]+)',
|
|
167
|
+
r'(?:cp|mv|rm)\s+[^\s]+\s+([^\s|>]+)',
|
|
168
|
+
r'(?:chmod|chown)\s+[^\s]+\s+([^\s|>]+)',
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
for pattern in path_patterns:
|
|
172
|
+
matches = re.findall(pattern, command)
|
|
173
|
+
for match in matches:
|
|
174
|
+
paths.append((event.get("timestamp", ""), match))
|
|
175
|
+
|
|
176
|
+
return paths
|
|
177
|
+
|
|
178
|
+
def _check_path_escalation(self, events: List[Dict]) -> Tuple[bool, Dict]:
|
|
179
|
+
"""Check for progressive access to more sensitive paths."""
|
|
180
|
+
paths = self._extract_paths(events)
|
|
181
|
+
if len(paths) < 3:
|
|
182
|
+
return False, {}
|
|
183
|
+
|
|
184
|
+
sensitivity_order = {"safe": 0, "medium": 1, "high": 2, "critical": 3, "unknown": 0}
|
|
185
|
+
sensitivity_sequence = []
|
|
186
|
+
|
|
187
|
+
for timestamp, path in paths:
|
|
188
|
+
level = self._get_path_sensitivity(path)
|
|
189
|
+
sensitivity_sequence.append((timestamp, path, level))
|
|
190
|
+
|
|
191
|
+
# Check for escalation pattern
|
|
192
|
+
max_seen = 0
|
|
193
|
+
escalation_detected = False
|
|
194
|
+
escalation_path = []
|
|
195
|
+
|
|
196
|
+
for ts, path, level in sensitivity_sequence:
|
|
197
|
+
level_num = sensitivity_order.get(level, 0)
|
|
198
|
+
if level_num > max_seen:
|
|
199
|
+
if max_seen > 0: # Not first access
|
|
200
|
+
escalation_detected = True
|
|
201
|
+
max_seen = level_num
|
|
202
|
+
escalation_path.append({"path": path, "level": level})
|
|
203
|
+
|
|
204
|
+
return escalation_detected, {
|
|
205
|
+
"max_sensitivity": list(sensitivity_order.keys())[max_seen] if max_seen else "safe",
|
|
206
|
+
"escalation_path": escalation_path[-5:] if escalation_path else []
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
def _check_repeated_denials(self, events: List[Dict]) -> Tuple[bool, Dict]:
|
|
210
|
+
"""Check for repeated attempts after denials."""
|
|
211
|
+
denials = [e for e in events if e.get("decision") in ("block", "ask")]
|
|
212
|
+
if len(denials) < 2:
|
|
213
|
+
return False, {}
|
|
214
|
+
|
|
215
|
+
# Group denials by pattern/tool
|
|
216
|
+
denial_patterns = Counter()
|
|
217
|
+
for denial in denials:
|
|
218
|
+
key = (denial.get("tool_name"), denial.get("pattern_name"))
|
|
219
|
+
denial_patterns[key] += 1
|
|
220
|
+
|
|
221
|
+
# Check for repeated attempts
|
|
222
|
+
repeated = [(k, v) for k, v in denial_patterns.items() if v >= 2]
|
|
223
|
+
|
|
224
|
+
if repeated:
|
|
225
|
+
return True, {
|
|
226
|
+
"repeated_denials": [
|
|
227
|
+
{"tool": k[0], "pattern": k[1], "count": v}
|
|
228
|
+
for k, v in repeated
|
|
229
|
+
],
|
|
230
|
+
"total_denials": len(denials)
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
return False, {"total_denials": len(denials)}
|
|
234
|
+
|
|
235
|
+
def _check_behavior_shift(self, events: List[Dict]) -> Tuple[bool, Dict]:
|
|
236
|
+
"""Check for sudden changes in behavior patterns."""
|
|
237
|
+
if len(events) < 10:
|
|
238
|
+
return False, {}
|
|
239
|
+
|
|
240
|
+
# Split into first half and second half
|
|
241
|
+
mid = len(events) // 2
|
|
242
|
+
first_half = events[:mid]
|
|
243
|
+
second_half = events[mid:]
|
|
244
|
+
|
|
245
|
+
# Compare tool usage distribution
|
|
246
|
+
first_tools = Counter(e.get("tool_name") for e in first_half)
|
|
247
|
+
second_tools = Counter(e.get("tool_name") for e in second_half)
|
|
248
|
+
|
|
249
|
+
# Calculate Jaccard distance
|
|
250
|
+
all_tools = set(first_tools.keys()) | set(second_tools.keys())
|
|
251
|
+
common_tools = set(first_tools.keys()) & set(second_tools.keys())
|
|
252
|
+
|
|
253
|
+
if len(all_tools) == 0:
|
|
254
|
+
return False, {}
|
|
255
|
+
|
|
256
|
+
jaccard = len(common_tools) / len(all_tools)
|
|
257
|
+
behavior_shift = jaccard < 0.5 # Less than 50% overlap
|
|
258
|
+
|
|
259
|
+
# Compare tier distribution
|
|
260
|
+
first_tiers = Counter(e.get("tier") for e in first_half)
|
|
261
|
+
second_tiers = Counter(e.get("tier") for e in second_half)
|
|
262
|
+
|
|
263
|
+
# Check for dangerous tier increase
|
|
264
|
+
first_dangerous = first_tiers.get("dangerous", 0) / max(len(first_half), 1)
|
|
265
|
+
second_dangerous = second_tiers.get("dangerous", 0) / max(len(second_half), 1)
|
|
266
|
+
tier_shift = second_dangerous > first_dangerous * 2 # 2x increase
|
|
267
|
+
|
|
268
|
+
return (behavior_shift or tier_shift), {
|
|
269
|
+
"tool_overlap": round(jaccard, 2),
|
|
270
|
+
"first_half_tools": dict(first_tools),
|
|
271
|
+
"second_half_tools": dict(second_tools),
|
|
272
|
+
"dangerous_ratio_change": {
|
|
273
|
+
"first": round(first_dangerous, 2),
|
|
274
|
+
"second": round(second_dangerous, 2)
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
def _check_injection_indicators(self, events: List[Dict]) -> Tuple[bool, Dict]:
|
|
279
|
+
"""Check for signs of persistent prompt injection."""
|
|
280
|
+
injection_matches = []
|
|
281
|
+
|
|
282
|
+
for event in events:
|
|
283
|
+
command = event.get("command", "") or ""
|
|
284
|
+
metadata = event.get("metadata_json")
|
|
285
|
+
if metadata:
|
|
286
|
+
try:
|
|
287
|
+
meta = json.loads(metadata)
|
|
288
|
+
tool_input = meta.get("tool_input", {})
|
|
289
|
+
if isinstance(tool_input, dict):
|
|
290
|
+
command += " " + str(tool_input)
|
|
291
|
+
except json.JSONDecodeError:
|
|
292
|
+
pass
|
|
293
|
+
|
|
294
|
+
for pattern in self.INJECTION_INDICATORS:
|
|
295
|
+
if re.search(pattern, command, re.IGNORECASE):
|
|
296
|
+
injection_matches.append({
|
|
297
|
+
"timestamp": event.get("timestamp"),
|
|
298
|
+
"pattern": pattern,
|
|
299
|
+
"tool": event.get("tool_name")
|
|
300
|
+
})
|
|
301
|
+
|
|
302
|
+
if injection_matches:
|
|
303
|
+
return True, {
|
|
304
|
+
"injection_indicators": injection_matches[:5],
|
|
305
|
+
"total_matches": len(injection_matches)
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
return False, {}
|
|
309
|
+
|
|
310
|
+
def _check_graduated_escalation(self, events: List[Dict]) -> Tuple[bool, Dict]:
|
|
311
|
+
"""
|
|
312
|
+
Check for ACIP-style graduated escalation trigger.
|
|
313
|
+
|
|
314
|
+
After 3+ denied/blocked attempts in a session, trigger elevated scrutiny.
|
|
315
|
+
This implements ACIP's "graduated response posture" concept.
|
|
316
|
+
"""
|
|
317
|
+
denials = [e for e in events if e.get("decision") in ("block", "ask")]
|
|
318
|
+
denial_count = len(denials)
|
|
319
|
+
|
|
320
|
+
# ACIP threshold: 3+ refused attempts triggers escalation
|
|
321
|
+
if denial_count >= 3:
|
|
322
|
+
return True, {
|
|
323
|
+
"denial_count": denial_count,
|
|
324
|
+
"threshold": 3,
|
|
325
|
+
"message": "ACIP graduated escalation: 3+ blocked attempts in session"
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
return False, {"denial_count": denial_count}
|
|
329
|
+
|
|
330
|
+
def _check_capability_aggregation(self, events: List[Dict]) -> Tuple[bool, Dict]:
|
|
331
|
+
"""
|
|
332
|
+
Check for ACIP-style capability aggregation (drip attack).
|
|
333
|
+
|
|
334
|
+
Detects patterns where information is gathered incrementally
|
|
335
|
+
across multiple turns toward a potentially harmful goal.
|
|
336
|
+
"""
|
|
337
|
+
# Track progression of sensitive path access
|
|
338
|
+
sensitive_accesses = []
|
|
339
|
+
for event in events:
|
|
340
|
+
command = event.get("command", "") or ""
|
|
341
|
+
# Track access to security-relevant paths
|
|
342
|
+
if any(p in command.lower() for p in [
|
|
343
|
+
".ssh", ".aws", ".env", "credentials", "password",
|
|
344
|
+
"secret", "token", "key", "config"
|
|
345
|
+
]):
|
|
346
|
+
sensitive_accesses.append({
|
|
347
|
+
"timestamp": event.get("timestamp"),
|
|
348
|
+
"command": command[:100]
|
|
349
|
+
})
|
|
350
|
+
|
|
351
|
+
# If accessing multiple different sensitive areas, flag as aggregation
|
|
352
|
+
unique_areas = set()
|
|
353
|
+
for access in sensitive_accesses:
|
|
354
|
+
cmd = access.get("command", "").lower()
|
|
355
|
+
for area in [".ssh", ".aws", ".env", "credentials", "password", "token", "key"]:
|
|
356
|
+
if area in cmd:
|
|
357
|
+
unique_areas.add(area)
|
|
358
|
+
|
|
359
|
+
if len(unique_areas) >= 3:
|
|
360
|
+
return True, {
|
|
361
|
+
"unique_sensitive_areas": list(unique_areas),
|
|
362
|
+
"total_sensitive_accesses": len(sensitive_accesses),
|
|
363
|
+
"message": "ACIP capability aggregation: accessing multiple sensitive areas"
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
return False, {"unique_sensitive_areas": list(unique_areas)}
|
|
367
|
+
|
|
368
|
+
def _calculate_risk_score(
|
|
369
|
+
self,
|
|
370
|
+
anomalies: List[AnomalyType],
|
|
371
|
+
events: List[Dict],
|
|
372
|
+
details: Dict[str, Any]
|
|
373
|
+
) -> float:
|
|
374
|
+
"""Calculate overall risk score for the session."""
|
|
375
|
+
score = 0.0
|
|
376
|
+
|
|
377
|
+
# Base score from anomaly count
|
|
378
|
+
anomaly_weights = {
|
|
379
|
+
AnomalyType.PRIVILEGE_ESCALATION: 0.3,
|
|
380
|
+
AnomalyType.PATH_ESCALATION: 0.25,
|
|
381
|
+
AnomalyType.REPEATED_DENIALS: 0.2,
|
|
382
|
+
AnomalyType.BEHAVIOR_SHIFT: 0.15,
|
|
383
|
+
AnomalyType.SUSPICIOUS_PATTERN: 0.25,
|
|
384
|
+
AnomalyType.VELOCITY_CHANGE: 0.1,
|
|
385
|
+
AnomalyType.TIER_DRIFT: 0.15,
|
|
386
|
+
AnomalyType.CAPABILITY_AGGREGATION: 0.3, # ACIP
|
|
387
|
+
AnomalyType.GRADUATED_ESCALATION: 0.25, # ACIP
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
for anomaly in anomalies:
|
|
391
|
+
score += anomaly_weights.get(anomaly, 0.1)
|
|
392
|
+
|
|
393
|
+
# Adjust based on denial ratio
|
|
394
|
+
denials = len([e for e in events if e.get("decision") in ("block", "ask")])
|
|
395
|
+
total = len(events)
|
|
396
|
+
if total > 0:
|
|
397
|
+
denial_ratio = denials / total
|
|
398
|
+
if denial_ratio > 0.3:
|
|
399
|
+
score += 0.1
|
|
400
|
+
|
|
401
|
+
# Adjust based on dangerous command ratio
|
|
402
|
+
dangerous = len([e for e in events if e.get("tier") == "dangerous"])
|
|
403
|
+
if total > 0 and (dangerous / total) > 0.5:
|
|
404
|
+
score += 0.1
|
|
405
|
+
|
|
406
|
+
# Cap at 1.0
|
|
407
|
+
return min(score, 1.0)
|
|
408
|
+
|
|
409
|
+
def analyze(self, session_id: str) -> SessionAnalysis:
|
|
410
|
+
"""
|
|
411
|
+
Analyze a session for cross-turn anomalies.
|
|
412
|
+
|
|
413
|
+
Session analysis is free and open source.
|
|
414
|
+
|
|
415
|
+
Args:
|
|
416
|
+
session_id: Session to analyze
|
|
417
|
+
|
|
418
|
+
Returns:
|
|
419
|
+
SessionAnalysis with risk score and detected anomalies
|
|
420
|
+
"""
|
|
421
|
+
if not session_id:
|
|
422
|
+
return SessionAnalysis(
|
|
423
|
+
session_id="unknown",
|
|
424
|
+
risk_score=0.0,
|
|
425
|
+
details={"error": "No session ID provided"}
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
try:
|
|
429
|
+
with self.logger._get_connection() as conn:
|
|
430
|
+
events = self._get_session_events(conn, session_id)
|
|
431
|
+
|
|
432
|
+
if len(events) < 3:
|
|
433
|
+
return SessionAnalysis(
|
|
434
|
+
session_id=session_id,
|
|
435
|
+
risk_score=0.0,
|
|
436
|
+
details={"message": "Insufficient events for analysis"}
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
anomalies = []
|
|
440
|
+
all_details = {"total_events": len(events)}
|
|
441
|
+
|
|
442
|
+
# Check for path escalation
|
|
443
|
+
path_esc, path_details = self._check_path_escalation(events)
|
|
444
|
+
if path_esc:
|
|
445
|
+
anomalies.append(AnomalyType.PATH_ESCALATION)
|
|
446
|
+
all_details["path_analysis"] = path_details
|
|
447
|
+
|
|
448
|
+
# Check for repeated denials
|
|
449
|
+
repeated, denial_details = self._check_repeated_denials(events)
|
|
450
|
+
if repeated:
|
|
451
|
+
anomalies.append(AnomalyType.REPEATED_DENIALS)
|
|
452
|
+
all_details["denial_analysis"] = denial_details
|
|
453
|
+
|
|
454
|
+
# Check for behavior shift
|
|
455
|
+
behavior_shift, behavior_details = self._check_behavior_shift(events)
|
|
456
|
+
if behavior_shift:
|
|
457
|
+
anomalies.append(AnomalyType.BEHAVIOR_SHIFT)
|
|
458
|
+
all_details["behavior_analysis"] = behavior_details
|
|
459
|
+
|
|
460
|
+
# Check for injection indicators
|
|
461
|
+
injection, injection_details = self._check_injection_indicators(events)
|
|
462
|
+
if injection:
|
|
463
|
+
anomalies.append(AnomalyType.SUSPICIOUS_PATTERN)
|
|
464
|
+
all_details["injection_analysis"] = injection_details
|
|
465
|
+
|
|
466
|
+
# ACIP: Check for graduated escalation (3+ denials)
|
|
467
|
+
graduated, graduated_details = self._check_graduated_escalation(events)
|
|
468
|
+
if graduated:
|
|
469
|
+
anomalies.append(AnomalyType.GRADUATED_ESCALATION)
|
|
470
|
+
all_details["graduated_escalation"] = graduated_details
|
|
471
|
+
|
|
472
|
+
# ACIP: Check for capability aggregation
|
|
473
|
+
aggregation, aggregation_details = self._check_capability_aggregation(events)
|
|
474
|
+
if aggregation:
|
|
475
|
+
anomalies.append(AnomalyType.CAPABILITY_AGGREGATION)
|
|
476
|
+
all_details["capability_aggregation"] = aggregation_details
|
|
477
|
+
|
|
478
|
+
# Calculate risk score
|
|
479
|
+
risk_score = self._calculate_risk_score(anomalies, events, all_details)
|
|
480
|
+
|
|
481
|
+
# Generate recommendations
|
|
482
|
+
recommendations = []
|
|
483
|
+
if AnomalyType.PATH_ESCALATION in anomalies:
|
|
484
|
+
recommendations.append(
|
|
485
|
+
"Progressive access to sensitive paths detected. "
|
|
486
|
+
"Review recent file access patterns."
|
|
487
|
+
)
|
|
488
|
+
if AnomalyType.REPEATED_DENIALS in anomalies:
|
|
489
|
+
recommendations.append(
|
|
490
|
+
"Multiple blocked operations may indicate attack attempts. "
|
|
491
|
+
"Consider ending session."
|
|
492
|
+
)
|
|
493
|
+
if AnomalyType.BEHAVIOR_SHIFT in anomalies:
|
|
494
|
+
recommendations.append(
|
|
495
|
+
"Significant behavior change detected mid-session. "
|
|
496
|
+
"Verify current task context."
|
|
497
|
+
)
|
|
498
|
+
if AnomalyType.SUSPICIOUS_PATTERN in anomalies:
|
|
499
|
+
recommendations.append(
|
|
500
|
+
"Prompt injection indicators detected. "
|
|
501
|
+
"Session may be compromised."
|
|
502
|
+
)
|
|
503
|
+
if AnomalyType.GRADUATED_ESCALATION in anomalies:
|
|
504
|
+
recommendations.append(
|
|
505
|
+
"ACIP graduated response: 3+ blocked attempts. "
|
|
506
|
+
"Applying elevated scrutiny to all operations."
|
|
507
|
+
)
|
|
508
|
+
if AnomalyType.CAPABILITY_AGGREGATION in anomalies:
|
|
509
|
+
recommendations.append(
|
|
510
|
+
"ACIP capability aggregation: Accessing multiple sensitive areas. "
|
|
511
|
+
"Possible drip attack in progress."
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
# Update session profile
|
|
515
|
+
self._update_session_profile(conn, session_id, events, risk_score, anomalies)
|
|
516
|
+
|
|
517
|
+
return SessionAnalysis(
|
|
518
|
+
session_id=session_id,
|
|
519
|
+
risk_score=risk_score,
|
|
520
|
+
anomalies=anomalies,
|
|
521
|
+
details=all_details,
|
|
522
|
+
recommendations=recommendations
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
except Exception as e:
|
|
526
|
+
return SessionAnalysis(
|
|
527
|
+
session_id=session_id,
|
|
528
|
+
risk_score=0.0,
|
|
529
|
+
details={"error": str(e)}
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
def _update_session_profile(
|
|
533
|
+
self,
|
|
534
|
+
conn: sqlite3.Connection,
|
|
535
|
+
session_id: str,
|
|
536
|
+
events: List[Dict],
|
|
537
|
+
risk_score: float,
|
|
538
|
+
anomalies: List[AnomalyType]
|
|
539
|
+
):
|
|
540
|
+
"""Update the session profile table."""
|
|
541
|
+
try:
|
|
542
|
+
now = datetime.now().isoformat()
|
|
543
|
+
dangerous_count = len([e for e in events if e.get("tier") == "dangerous"])
|
|
544
|
+
denied_count = len([e for e in events if e.get("decision") in ("block", "ask")])
|
|
545
|
+
anomaly_flags = json.dumps([a.value for a in anomalies])
|
|
546
|
+
|
|
547
|
+
conn.execute("""
|
|
548
|
+
INSERT INTO session_profiles (
|
|
549
|
+
session_id, first_seen, last_seen, total_invocations,
|
|
550
|
+
dangerous_count, denied_count, risk_score, anomaly_flags
|
|
551
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
552
|
+
ON CONFLICT(session_id) DO UPDATE SET
|
|
553
|
+
last_seen = excluded.last_seen,
|
|
554
|
+
total_invocations = excluded.total_invocations,
|
|
555
|
+
dangerous_count = excluded.dangerous_count,
|
|
556
|
+
denied_count = excluded.denied_count,
|
|
557
|
+
risk_score = excluded.risk_score,
|
|
558
|
+
anomaly_flags = excluded.anomaly_flags
|
|
559
|
+
""", (
|
|
560
|
+
session_id, now, now, len(events),
|
|
561
|
+
dangerous_count, denied_count, risk_score, anomaly_flags
|
|
562
|
+
))
|
|
563
|
+
except Exception:
|
|
564
|
+
pass
|
|
565
|
+
|
|
566
|
+
def format_analysis_message(self, analysis: SessionAnalysis) -> str:
|
|
567
|
+
"""Format a user-friendly analysis message."""
|
|
568
|
+
if not analysis.is_suspicious:
|
|
569
|
+
return ""
|
|
570
|
+
|
|
571
|
+
lines = [
|
|
572
|
+
"Session Analysis Alert",
|
|
573
|
+
"=" * 45,
|
|
574
|
+
f"Risk Score: {analysis.risk_score:.0%}",
|
|
575
|
+
]
|
|
576
|
+
|
|
577
|
+
if analysis.anomalies:
|
|
578
|
+
lines.append("\nDetected Anomalies:")
|
|
579
|
+
for anomaly in analysis.anomalies:
|
|
580
|
+
lines.append(f" - {anomaly.value.replace('_', ' ').title()}")
|
|
581
|
+
|
|
582
|
+
if analysis.recommendations:
|
|
583
|
+
lines.append("\nRecommendations:")
|
|
584
|
+
for rec in analysis.recommendations:
|
|
585
|
+
lines.append(f" - {rec}")
|
|
586
|
+
|
|
587
|
+
lines.append("=" * 45)
|
|
588
|
+
return "\n".join(lines)
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
# Singleton instance
|
|
592
|
+
_session_analyzer: Optional[SessionAnalyzer] = None
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
def get_session_analyzer() -> SessionAnalyzer:
|
|
596
|
+
"""Get the singleton session analyzer instance."""
|
|
597
|
+
global _session_analyzer
|
|
598
|
+
if _session_analyzer is None:
|
|
599
|
+
_session_analyzer = SessionAnalyzer()
|
|
600
|
+
return _session_analyzer
|
tweek/vault/__init__.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tweek Vault - Cross-platform secure credential storage.
|
|
3
|
+
|
|
4
|
+
Uses the keyring library which provides:
|
|
5
|
+
- macOS: Keychain
|
|
6
|
+
- Linux: Secret Service (GNOME Keyring, KWallet)
|
|
7
|
+
- Windows: Windows Credential Locker
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from tweek.platform import PLATFORM, Platform
|
|
11
|
+
|
|
12
|
+
# Try to use cross-platform vault first
|
|
13
|
+
try:
|
|
14
|
+
from .cross_platform import CrossPlatformVault, get_vault, migrate_env_to_vault
|
|
15
|
+
VAULT_AVAILABLE = True
|
|
16
|
+
VAULT_TYPE = "cross-platform"
|
|
17
|
+
except ImportError:
|
|
18
|
+
# Fall back to macOS-only keychain if keyring not installed
|
|
19
|
+
VAULT_AVAILABLE = False
|
|
20
|
+
VAULT_TYPE = None
|
|
21
|
+
CrossPlatformVault = None
|
|
22
|
+
get_vault = None
|
|
23
|
+
migrate_env_to_vault = None
|
|
24
|
+
|
|
25
|
+
# Keep old imports for backwards compatibility on macOS
|
|
26
|
+
try:
|
|
27
|
+
from .keychain import KeychainVault, VaultError
|
|
28
|
+
except ImportError:
|
|
29
|
+
KeychainVault = None
|
|
30
|
+
VaultError = Exception
|
|
31
|
+
|
|
32
|
+
__all__ = [
|
|
33
|
+
"CrossPlatformVault",
|
|
34
|
+
"KeychainVault",
|
|
35
|
+
"VaultError",
|
|
36
|
+
"get_vault",
|
|
37
|
+
"migrate_env_to_vault",
|
|
38
|
+
"VAULT_AVAILABLE",
|
|
39
|
+
"VAULT_TYPE",
|
|
40
|
+
]
|