tweek 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. tweek/__init__.py +16 -0
  2. tweek/cli.py +3390 -0
  3. tweek/cli_helpers.py +193 -0
  4. tweek/config/__init__.py +13 -0
  5. tweek/config/allowed_dirs.yaml +23 -0
  6. tweek/config/manager.py +1064 -0
  7. tweek/config/patterns.yaml +751 -0
  8. tweek/config/tiers.yaml +129 -0
  9. tweek/diagnostics.py +589 -0
  10. tweek/hooks/__init__.py +1 -0
  11. tweek/hooks/pre_tool_use.py +861 -0
  12. tweek/integrations/__init__.py +3 -0
  13. tweek/integrations/moltbot.py +243 -0
  14. tweek/licensing.py +398 -0
  15. tweek/logging/__init__.py +9 -0
  16. tweek/logging/bundle.py +350 -0
  17. tweek/logging/json_logger.py +150 -0
  18. tweek/logging/security_log.py +745 -0
  19. tweek/mcp/__init__.py +24 -0
  20. tweek/mcp/approval.py +456 -0
  21. tweek/mcp/approval_cli.py +356 -0
  22. tweek/mcp/clients/__init__.py +37 -0
  23. tweek/mcp/clients/chatgpt.py +112 -0
  24. tweek/mcp/clients/claude_desktop.py +203 -0
  25. tweek/mcp/clients/gemini.py +178 -0
  26. tweek/mcp/proxy.py +667 -0
  27. tweek/mcp/screening.py +175 -0
  28. tweek/mcp/server.py +317 -0
  29. tweek/platform/__init__.py +131 -0
  30. tweek/plugins/__init__.py +835 -0
  31. tweek/plugins/base.py +1080 -0
  32. tweek/plugins/compliance/__init__.py +30 -0
  33. tweek/plugins/compliance/gdpr.py +333 -0
  34. tweek/plugins/compliance/gov.py +324 -0
  35. tweek/plugins/compliance/hipaa.py +285 -0
  36. tweek/plugins/compliance/legal.py +322 -0
  37. tweek/plugins/compliance/pci.py +361 -0
  38. tweek/plugins/compliance/soc2.py +275 -0
  39. tweek/plugins/detectors/__init__.py +30 -0
  40. tweek/plugins/detectors/continue_dev.py +206 -0
  41. tweek/plugins/detectors/copilot.py +254 -0
  42. tweek/plugins/detectors/cursor.py +192 -0
  43. tweek/plugins/detectors/moltbot.py +205 -0
  44. tweek/plugins/detectors/windsurf.py +214 -0
  45. tweek/plugins/git_discovery.py +395 -0
  46. tweek/plugins/git_installer.py +491 -0
  47. tweek/plugins/git_lockfile.py +338 -0
  48. tweek/plugins/git_registry.py +503 -0
  49. tweek/plugins/git_security.py +482 -0
  50. tweek/plugins/providers/__init__.py +30 -0
  51. tweek/plugins/providers/anthropic.py +181 -0
  52. tweek/plugins/providers/azure_openai.py +289 -0
  53. tweek/plugins/providers/bedrock.py +248 -0
  54. tweek/plugins/providers/google.py +197 -0
  55. tweek/plugins/providers/openai.py +230 -0
  56. tweek/plugins/scope.py +130 -0
  57. tweek/plugins/screening/__init__.py +26 -0
  58. tweek/plugins/screening/llm_reviewer.py +149 -0
  59. tweek/plugins/screening/pattern_matcher.py +273 -0
  60. tweek/plugins/screening/rate_limiter.py +174 -0
  61. tweek/plugins/screening/session_analyzer.py +159 -0
  62. tweek/proxy/__init__.py +302 -0
  63. tweek/proxy/addon.py +223 -0
  64. tweek/proxy/interceptor.py +313 -0
  65. tweek/proxy/server.py +315 -0
  66. tweek/sandbox/__init__.py +71 -0
  67. tweek/sandbox/executor.py +382 -0
  68. tweek/sandbox/linux.py +278 -0
  69. tweek/sandbox/profile_generator.py +323 -0
  70. tweek/screening/__init__.py +13 -0
  71. tweek/screening/context.py +81 -0
  72. tweek/security/__init__.py +22 -0
  73. tweek/security/llm_reviewer.py +348 -0
  74. tweek/security/rate_limiter.py +682 -0
  75. tweek/security/secret_scanner.py +506 -0
  76. tweek/security/session_analyzer.py +600 -0
  77. tweek/vault/__init__.py +40 -0
  78. tweek/vault/cross_platform.py +251 -0
  79. tweek/vault/keychain.py +288 -0
  80. tweek-0.1.0.dist-info/METADATA +335 -0
  81. tweek-0.1.0.dist-info/RECORD +85 -0
  82. tweek-0.1.0.dist-info/WHEEL +5 -0
  83. tweek-0.1.0.dist-info/entry_points.txt +25 -0
  84. tweek-0.1.0.dist-info/licenses/LICENSE +190 -0
  85. tweek-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,149 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Tweek LLM Reviewer Screening Plugin
4
+
5
+ Semantic analysis using LLM for risky/dangerous operations:
6
+ - Sensitive path access detection
7
+ - Data exfiltration potential
8
+ - System configuration changes
9
+ - Prompt injection indicators
10
+ - Privilege escalation attempts
11
+
12
+ Free and open source. Requires ANTHROPIC_API_KEY (BYOK).
13
+ """
14
+
15
+ from typing import Optional, Dict, Any, List
16
+ from tweek.plugins.base import (
17
+ ScreeningPlugin,
18
+ ScreeningResult,
19
+ Finding,
20
+ Severity,
21
+ ActionType,
22
+ )
23
+
24
+
25
+ class LLMReviewerPlugin(ScreeningPlugin):
26
+ """
27
+ LLM-based security reviewer plugin.
28
+
29
+ Uses a fast, cheap LLM (Claude Haiku) to analyze commands
30
+ that pass regex screening but may still be malicious.
31
+
32
+ Free and open source. Requires ANTHROPIC_API_KEY (BYOK).
33
+ """
34
+
35
+ VERSION = "1.0.0"
36
+ DESCRIPTION = "Semantic security analysis using LLM"
37
+ AUTHOR = "Tweek"
38
+ REQUIRES_LICENSE = "free"
39
+ TAGS = ["screening", "llm", "semantic-analysis"]
40
+
41
+ def __init__(self, config: Optional[Dict[str, Any]] = None):
42
+ super().__init__(config)
43
+ self._reviewer = None
44
+
45
+ @property
46
+ def name(self) -> str:
47
+ return "llm_reviewer"
48
+
49
+ def _get_reviewer(self):
50
+ """Lazy initialization of LLM reviewer."""
51
+ if self._reviewer is None:
52
+ try:
53
+ from tweek.security.llm_reviewer import LLMReviewer
54
+
55
+ self._reviewer = LLMReviewer(
56
+ model=self._config.get("model", "claude-3-5-haiku-latest"),
57
+ api_key=self._config.get("api_key"),
58
+ timeout=self._config.get("timeout", 5.0),
59
+ enabled=self._config.get("enabled", True),
60
+ )
61
+ except ImportError:
62
+ pass
63
+
64
+ return self._reviewer
65
+
66
+ def screen(
67
+ self,
68
+ tool_name: str,
69
+ content: str,
70
+ context: Dict[str, Any]
71
+ ) -> ScreeningResult:
72
+ """
73
+ Screen content using LLM semantic analysis.
74
+
75
+ Args:
76
+ tool_name: Name of the tool being invoked
77
+ content: Command or content to analyze
78
+ context: Should include 'tier', optionally 'tool_input', 'session_id'
79
+
80
+ Returns:
81
+ ScreeningResult with LLM analysis
82
+ """
83
+ reviewer = self._get_reviewer()
84
+ if reviewer is None or not reviewer.enabled:
85
+ return ScreeningResult(
86
+ allowed=True,
87
+ plugin_name=self.name,
88
+ reason="LLM reviewer not available or disabled",
89
+ )
90
+
91
+ tier = context.get("tier", "default")
92
+ tool_input = context.get("tool_input")
93
+ session_id = context.get("session_id")
94
+
95
+ result = reviewer.review(
96
+ command=content,
97
+ tool=tool_name,
98
+ tier=tier,
99
+ tool_input=tool_input,
100
+ session_context=f"session:{session_id}" if session_id else None,
101
+ )
102
+
103
+ # Convert RiskLevel to severity
104
+ from tweek.security.llm_reviewer import RiskLevel
105
+
106
+ risk_severity_map = {
107
+ RiskLevel.SAFE: Severity.LOW,
108
+ RiskLevel.SUSPICIOUS: Severity.MEDIUM,
109
+ RiskLevel.DANGEROUS: Severity.HIGH,
110
+ }
111
+
112
+ risk_level_map = {
113
+ RiskLevel.SAFE: "safe",
114
+ RiskLevel.SUSPICIOUS: "suspicious",
115
+ RiskLevel.DANGEROUS: "dangerous",
116
+ }
117
+
118
+ severity = risk_severity_map.get(result.risk_level, Severity.MEDIUM)
119
+ risk_level = risk_level_map.get(result.risk_level, "suspicious")
120
+
121
+ findings = []
122
+ if result.is_suspicious:
123
+ findings.append(Finding(
124
+ pattern_name="llm_review",
125
+ matched_text=content[:100],
126
+ severity=severity,
127
+ description=result.reason,
128
+ recommended_action=ActionType.ASK if result.should_prompt else ActionType.WARN,
129
+ metadata={
130
+ "confidence": result.confidence,
131
+ "model": self._config.get("model", "claude-3-5-haiku-latest"),
132
+ }
133
+ ))
134
+
135
+ return ScreeningResult(
136
+ allowed=not result.is_dangerous,
137
+ plugin_name=self.name,
138
+ reason=result.reason,
139
+ risk_level=risk_level,
140
+ confidence=result.confidence,
141
+ should_prompt=result.should_prompt,
142
+ findings=findings,
143
+ details=result.details,
144
+ )
145
+
146
+ def is_available(self) -> bool:
147
+ """Check if LLM reviewer is available and configured."""
148
+ reviewer = self._get_reviewer()
149
+ return reviewer is not None and reviewer.enabled
@@ -0,0 +1,273 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Tweek Pattern Matcher Screening Plugin
4
+
5
+ Regex-based pattern matching for known attack vectors:
6
+ - Credential access patterns
7
+ - Data exfiltration patterns
8
+ - Prompt injection patterns
9
+ - Privilege escalation patterns
10
+
11
+ FREE feature - available to all users.
12
+ """
13
+
14
+ import re
15
+ from pathlib import Path
16
+ from typing import Optional, Dict, Any, List
17
+
18
+ import yaml
19
+
20
+ from tweek.plugins.base import (
21
+ ScreeningPlugin,
22
+ ScreeningResult,
23
+ Finding,
24
+ Severity,
25
+ ActionType,
26
+ )
27
+
28
+
29
+ class PatternMatcherPlugin(ScreeningPlugin):
30
+ """
31
+ Pattern matcher screening plugin.
32
+
33
+ Matches content against known attack patterns using regex.
34
+ Patterns are loaded from YAML configuration files.
35
+
36
+ FREE feature - all patterns available to all users.
37
+ """
38
+
39
+ VERSION = "1.0.0"
40
+ DESCRIPTION = "Regex-based pattern matching for known attack vectors"
41
+ AUTHOR = "Tweek"
42
+ REQUIRES_LICENSE = "free"
43
+ TAGS = ["screening", "pattern-matching", "regex"]
44
+
45
+ def __init__(self, config: Optional[Dict[str, Any]] = None):
46
+ super().__init__(config)
47
+ self._patterns: Optional[List[Dict]] = None
48
+ self._compiled: Dict[str, re.Pattern] = {}
49
+
50
+ @property
51
+ def name(self) -> str:
52
+ return "pattern_matcher"
53
+
54
+ def _load_patterns(self) -> List[Dict]:
55
+ """Load patterns from configuration files."""
56
+ if self._patterns is not None:
57
+ return self._patterns
58
+
59
+ self._patterns = []
60
+
61
+ # Try user patterns first
62
+ user_patterns = Path.home() / ".tweek" / "patterns" / "patterns.yaml"
63
+ bundled_patterns = Path(__file__).parent.parent.parent / "config" / "patterns.yaml"
64
+
65
+ patterns_path = None
66
+ if self._config.get("patterns_path"):
67
+ patterns_path = Path(self._config["patterns_path"])
68
+ elif user_patterns.exists():
69
+ patterns_path = user_patterns
70
+ elif bundled_patterns.exists():
71
+ patterns_path = bundled_patterns
72
+
73
+ if patterns_path and patterns_path.exists():
74
+ try:
75
+ with open(patterns_path) as f:
76
+ data = yaml.safe_load(f) or {}
77
+ self._patterns = data.get("patterns", [])
78
+ except (yaml.YAMLError, IOError):
79
+ pass
80
+
81
+ return self._patterns
82
+
83
+ def _get_compiled(self, pattern: str) -> Optional[re.Pattern]:
84
+ """Get or compile a regex pattern."""
85
+ if pattern not in self._compiled:
86
+ try:
87
+ self._compiled[pattern] = re.compile(pattern, re.IGNORECASE)
88
+ except re.error:
89
+ self._compiled[pattern] = None
90
+ return self._compiled[pattern]
91
+
92
+ def screen(
93
+ self,
94
+ tool_name: str,
95
+ content: str,
96
+ context: Dict[str, Any]
97
+ ) -> ScreeningResult:
98
+ """
99
+ Screen content against known attack patterns.
100
+
101
+ Args:
102
+ tool_name: Name of the tool being invoked
103
+ content: Command or content to screen
104
+ context: Additional context (unused for pattern matching)
105
+
106
+ Returns:
107
+ ScreeningResult with pattern match findings
108
+ """
109
+ patterns = self._load_patterns()
110
+ findings = []
111
+
112
+ for pattern_def in patterns:
113
+ regex = pattern_def.get("regex", "")
114
+ if not regex:
115
+ continue
116
+
117
+ compiled = self._get_compiled(regex)
118
+ if compiled is None:
119
+ continue
120
+
121
+ match = compiled.search(content)
122
+ if match:
123
+ severity_map = {
124
+ "critical": Severity.CRITICAL,
125
+ "high": Severity.HIGH,
126
+ "medium": Severity.MEDIUM,
127
+ "low": Severity.LOW,
128
+ }
129
+
130
+ action_map = {
131
+ "block": ActionType.BLOCK,
132
+ "warn": ActionType.WARN,
133
+ "ask": ActionType.ASK,
134
+ "allow": ActionType.ALLOW,
135
+ }
136
+
137
+ severity = severity_map.get(
138
+ pattern_def.get("severity", "medium"),
139
+ Severity.MEDIUM
140
+ )
141
+
142
+ action = action_map.get(
143
+ pattern_def.get("action", "ask"),
144
+ ActionType.ASK
145
+ )
146
+
147
+ findings.append(Finding(
148
+ pattern_name=pattern_def.get("name", "unknown"),
149
+ matched_text=match.group()[:100], # Truncate
150
+ severity=severity,
151
+ description=pattern_def.get("description", ""),
152
+ context=self._get_context(content, match.start()),
153
+ recommended_action=action,
154
+ metadata={
155
+ "pattern_id": pattern_def.get("id"),
156
+ "category": pattern_def.get("category"),
157
+ "tags": pattern_def.get("tags", []),
158
+ }
159
+ ))
160
+
161
+ if not findings:
162
+ return ScreeningResult(
163
+ allowed=True,
164
+ plugin_name=self.name,
165
+ risk_level="safe",
166
+ )
167
+
168
+ # Determine overall action
169
+ action_priority = [
170
+ ActionType.ALLOW,
171
+ ActionType.WARN,
172
+ ActionType.ASK,
173
+ ActionType.REDACT,
174
+ ActionType.BLOCK,
175
+ ]
176
+
177
+ max_action = ActionType.ALLOW
178
+ max_severity = Severity.LOW
179
+
180
+ for finding in findings:
181
+ if action_priority.index(finding.recommended_action) > action_priority.index(max_action):
182
+ max_action = finding.recommended_action
183
+ if list(Severity).index(finding.severity) > list(Severity).index(max_severity):
184
+ max_severity = finding.severity
185
+
186
+ # Determine risk level
187
+ risk_level = "safe"
188
+ if max_severity in (Severity.HIGH, Severity.CRITICAL):
189
+ risk_level = "dangerous"
190
+ elif max_severity == Severity.MEDIUM:
191
+ risk_level = "suspicious"
192
+
193
+ # Should prompt if action is ASK or BLOCK
194
+ should_prompt = max_action in (ActionType.ASK, ActionType.BLOCK)
195
+
196
+ return ScreeningResult(
197
+ allowed=max_action not in (ActionType.BLOCK,),
198
+ plugin_name=self.name,
199
+ reason=f"Matched {len(findings)} pattern(s): {', '.join(f.pattern_name for f in findings[:3])}",
200
+ risk_level=risk_level,
201
+ should_prompt=should_prompt,
202
+ findings=findings,
203
+ details={
204
+ "pattern_count": len(findings),
205
+ "max_severity": max_severity.value,
206
+ }
207
+ )
208
+
209
+ def _get_context(self, content: str, position: int, chars: int = 40) -> str:
210
+ """Get surrounding context for a match."""
211
+ start = max(0, position - chars)
212
+ end = min(len(content), position + chars)
213
+
214
+ prefix = "..." if start > 0 else ""
215
+ suffix = "..." if end < len(content) else ""
216
+
217
+ return f"{prefix}{content[start:end]}{suffix}"
218
+
219
+ def check(self, content: str) -> Optional[Dict]:
220
+ """
221
+ Simple check returning first matching pattern.
222
+
223
+ For compatibility with existing code.
224
+ """
225
+ patterns = self._load_patterns()
226
+
227
+ for pattern_def in patterns:
228
+ regex = pattern_def.get("regex", "")
229
+ if not regex:
230
+ continue
231
+
232
+ compiled = self._get_compiled(regex)
233
+ if compiled and compiled.search(content):
234
+ return pattern_def
235
+
236
+ return None
237
+
238
+ def check_all(self, content: str) -> List[Dict]:
239
+ """
240
+ Check content against all patterns, returning all matches.
241
+
242
+ For compatibility with existing code.
243
+ """
244
+ patterns = self._load_patterns()
245
+ matches = []
246
+
247
+ for pattern_def in patterns:
248
+ regex = pattern_def.get("regex", "")
249
+ if not regex:
250
+ continue
251
+
252
+ compiled = self._get_compiled(regex)
253
+ if compiled and compiled.search(content):
254
+ matches.append(pattern_def)
255
+
256
+ return matches
257
+
258
+ def get_pattern_count(self) -> int:
259
+ """Get total number of loaded patterns."""
260
+ return len(self._load_patterns())
261
+
262
+ def get_patterns_by_category(self) -> Dict[str, List[Dict]]:
263
+ """Get patterns grouped by category."""
264
+ patterns = self._load_patterns()
265
+ by_category: Dict[str, List[Dict]] = {}
266
+
267
+ for pattern in patterns:
268
+ category = pattern.get("category", "uncategorized")
269
+ if category not in by_category:
270
+ by_category[category] = []
271
+ by_category[category].append(pattern)
272
+
273
+ return by_category
@@ -0,0 +1,174 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Tweek Rate Limiter Screening Plugin
4
+
5
+ Wraps the rate limiter security module as a screening plugin.
6
+ Detects resource theft attacks and abuse patterns:
7
+ - Burst detection
8
+ - Repeated command detection
9
+ - Velocity anomalies
10
+ - Circuit breaker pattern
11
+ """
12
+
13
+ from typing import Optional, Dict, Any, List
14
+ from tweek.plugins.base import (
15
+ ScreeningPlugin,
16
+ ScreeningResult,
17
+ Finding,
18
+ Severity,
19
+ ActionType,
20
+ )
21
+
22
+
23
+ class RateLimiterPlugin(ScreeningPlugin):
24
+ """
25
+ Rate limiter screening plugin.
26
+
27
+ Detects patterns indicating resource theft or automated abuse:
28
+ - Burst patterns (many commands in short window)
29
+ - Repeated identical commands
30
+ - Unusual velocity changes
31
+ - Dangerous tier spikes
32
+
33
+ Free and open source.
34
+ """
35
+
36
+ VERSION = "1.0.0"
37
+ DESCRIPTION = "Detect resource theft and abuse patterns via rate limiting"
38
+ AUTHOR = "Tweek"
39
+ REQUIRES_LICENSE = "free"
40
+ TAGS = ["screening", "rate-limiting", "abuse-detection"]
41
+
42
+ def __init__(self, config: Optional[Dict[str, Any]] = None):
43
+ super().__init__(config)
44
+ self._rate_limiter = None
45
+
46
+ @property
47
+ def name(self) -> str:
48
+ return "rate_limiter"
49
+
50
+ def _get_rate_limiter(self):
51
+ """Lazy initialization of rate limiter."""
52
+ if self._rate_limiter is None:
53
+ try:
54
+ from tweek.security.rate_limiter import (
55
+ RateLimiter,
56
+ RateLimitConfig,
57
+ CircuitBreakerConfig,
58
+ )
59
+
60
+ # Build config from plugin config
61
+ rate_config = RateLimitConfig(
62
+ burst_window=self._config.get("burst_window", 5),
63
+ burst_threshold=self._config.get("burst_threshold", 15),
64
+ max_per_minute=self._config.get("max_per_minute", 60),
65
+ max_dangerous_per_minute=self._config.get("max_dangerous_per_minute", 10),
66
+ max_same_command=self._config.get("max_same_command", 5),
67
+ velocity_multiplier=self._config.get("velocity_multiplier", 3.0),
68
+ )
69
+
70
+ circuit_config = CircuitBreakerConfig(
71
+ failure_threshold=self._config.get("circuit_failure_threshold", 5),
72
+ open_timeout=self._config.get("circuit_open_timeout", 60),
73
+ )
74
+
75
+ self._rate_limiter = RateLimiter(
76
+ config=rate_config,
77
+ circuit_config=circuit_config,
78
+ )
79
+
80
+ except ImportError:
81
+ pass
82
+
83
+ return self._rate_limiter
84
+
85
+ def screen(
86
+ self,
87
+ tool_name: str,
88
+ content: str,
89
+ context: Dict[str, Any]
90
+ ) -> ScreeningResult:
91
+ """
92
+ Screen for rate limit violations.
93
+
94
+ Args:
95
+ tool_name: Name of the tool being invoked
96
+ content: Command or content
97
+ context: Must include 'session_id', optionally 'tier'
98
+
99
+ Returns:
100
+ ScreeningResult with rate limit decision
101
+ """
102
+ rate_limiter = self._get_rate_limiter()
103
+ if rate_limiter is None:
104
+ return ScreeningResult(
105
+ allowed=True,
106
+ plugin_name=self.name,
107
+ reason="Rate limiter not available",
108
+ )
109
+
110
+ session_id = context.get("session_id")
111
+ tier = context.get("tier")
112
+
113
+ # Only pass command for Bash tool
114
+ command = content if tool_name == "Bash" else None
115
+
116
+ result = rate_limiter.check(
117
+ tool_name=tool_name,
118
+ command=command,
119
+ session_id=session_id,
120
+ tier=tier,
121
+ )
122
+
123
+ if result.allowed:
124
+ return ScreeningResult(
125
+ allowed=True,
126
+ plugin_name=self.name,
127
+ risk_level="safe",
128
+ details=result.details,
129
+ )
130
+
131
+ # Convert violations to findings
132
+ findings = []
133
+ from tweek.security.rate_limiter import RateLimitViolation
134
+
135
+ violation_severity = {
136
+ RateLimitViolation.BURST: Severity.HIGH,
137
+ RateLimitViolation.REPEATED_COMMAND: Severity.MEDIUM,
138
+ RateLimitViolation.HIGH_VOLUME: Severity.MEDIUM,
139
+ RateLimitViolation.DANGEROUS_SPIKE: Severity.HIGH,
140
+ RateLimitViolation.VELOCITY_ANOMALY: Severity.MEDIUM,
141
+ RateLimitViolation.CIRCUIT_OPEN: Severity.CRITICAL,
142
+ }
143
+
144
+ for violation in result.violations:
145
+ findings.append(Finding(
146
+ pattern_name=f"rate_limit_{violation.value}",
147
+ matched_text=f"{tool_name}: {content[:50]}...",
148
+ severity=violation_severity.get(violation, Severity.MEDIUM),
149
+ description=f"Rate limit violation: {violation.value}",
150
+ recommended_action=ActionType.ASK,
151
+ ))
152
+
153
+ return ScreeningResult(
154
+ allowed=False,
155
+ plugin_name=self.name,
156
+ reason=result.message,
157
+ risk_level="dangerous" if result.is_circuit_open else "suspicious",
158
+ should_prompt=True,
159
+ details=result.details,
160
+ findings=findings,
161
+ )
162
+
163
+ def get_session_stats(self, session_id: str) -> Dict[str, Any]:
164
+ """Get statistics for a session."""
165
+ rate_limiter = self._get_rate_limiter()
166
+ if rate_limiter:
167
+ return rate_limiter.get_session_stats(session_id)
168
+ return {}
169
+
170
+ def reset_circuit(self, session_id: str) -> None:
171
+ """Reset circuit breaker for a session."""
172
+ rate_limiter = self._get_rate_limiter()
173
+ if rate_limiter:
174
+ rate_limiter.reset_circuit(session_id)