zen-ai-pentest 2.2.0__py3-none-any.whl → 2.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- api/auth.py +61 -7
- api/csrf_protection.py +286 -0
- api/main.py +77 -11
- api/rate_limiter.py +317 -0
- api/rate_limiter_v2.py +586 -0
- autonomous/ki_analysis_agent.py +1033 -0
- benchmarks/__init__.py +12 -142
- benchmarks/agent_performance.py +374 -0
- benchmarks/api_performance.py +479 -0
- benchmarks/scan_performance.py +272 -0
- modules/agent_coordinator.py +255 -0
- modules/api_key_manager.py +501 -0
- modules/benchmark.py +706 -0
- modules/cve_updater.py +303 -0
- modules/false_positive_filter.py +149 -0
- modules/output_formats.py +1088 -0
- modules/risk_scoring.py +206 -0
- {zen_ai_pentest-2.2.0.dist-info → zen_ai_pentest-2.3.0.dist-info}/METADATA +134 -289
- {zen_ai_pentest-2.2.0.dist-info → zen_ai_pentest-2.3.0.dist-info}/RECORD +23 -9
- {zen_ai_pentest-2.2.0.dist-info → zen_ai_pentest-2.3.0.dist-info}/WHEEL +0 -0
- {zen_ai_pentest-2.2.0.dist-info → zen_ai_pentest-2.3.0.dist-info}/entry_points.txt +0 -0
- {zen_ai_pentest-2.2.0.dist-info → zen_ai_pentest-2.3.0.dist-info}/licenses/LICENSE +0 -0
- {zen_ai_pentest-2.2.0.dist-info → zen_ai_pentest-2.3.0.dist-info}/top_level.txt +0 -0
modules/cve_updater.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
"""CVE Database Auto-Update Module
|
|
2
|
+
|
|
3
|
+
Fetches and updates CVE data from NVD (National Vulnerability Database)
|
|
4
|
+
- Daily delta updates
|
|
5
|
+
- Caching with ETags
|
|
6
|
+
- Rate limiting compliance (NVD: max 5 requests in 30 seconds)
|
|
7
|
+
"""
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
from typing import Dict, List, Optional, Set
|
|
12
|
+
from datetime import datetime, timedelta
|
|
13
|
+
from dataclasses import dataclass, asdict
|
|
14
|
+
import aiohttp
|
|
15
|
+
import aiofiles
|
|
16
|
+
import logging
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class CVEEntry:
|
|
21
|
+
"""Single CVE entry structure"""
|
|
22
|
+
id: str
|
|
23
|
+
published: str
|
|
24
|
+
last_modified: str
|
|
25
|
+
description: str
|
|
26
|
+
cvss_score: float
|
|
27
|
+
cvss_vector: str
|
|
28
|
+
severity: str
|
|
29
|
+
references: List[str]
|
|
30
|
+
cpe_matches: List[str]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class NVDClient:
|
|
34
|
+
"""NVD API Client with rate limiting"""
|
|
35
|
+
|
|
36
|
+
BASE_URL = "https://services.nvd.nist.gov/rest/json/cves/2.0"
|
|
37
|
+
RATE_LIMIT_DELAY = 6.0 # 6 seconds between requests (max 5 per 30s)
|
|
38
|
+
|
|
39
|
+
def __init__(self, api_key: Optional[str] = None):
|
|
40
|
+
self.api_key = api_key or os.getenv("NVD_API_KEY")
|
|
41
|
+
self.last_request_time = 0
|
|
42
|
+
self.session: Optional[aiohttp.ClientSession] = None
|
|
43
|
+
|
|
44
|
+
async def __aenter__(self):
|
|
45
|
+
self.session = aiohttp.ClientSession()
|
|
46
|
+
return self
|
|
47
|
+
|
|
48
|
+
async def __aexit__(self, *args):
|
|
49
|
+
if self.session:
|
|
50
|
+
await self.session.close()
|
|
51
|
+
|
|
52
|
+
async def _rate_limit(self):
|
|
53
|
+
"""Ensure rate limiting compliance"""
|
|
54
|
+
elapsed = asyncio.get_event_loop().time() - self.last_request_time
|
|
55
|
+
if elapsed < self.RATE_LIMIT_DELAY:
|
|
56
|
+
await asyncio.sleep(self.RATE_LIMIT_DELAY - elapsed)
|
|
57
|
+
self.last_request_time = asyncio.get_event_loop().time()
|
|
58
|
+
|
|
59
|
+
async def fetch_cves(
|
|
60
|
+
self,
|
|
61
|
+
start_date: Optional[datetime] = None,
|
|
62
|
+
end_date: Optional[datetime] = None,
|
|
63
|
+
results_per_page: int = 2000
|
|
64
|
+
) -> List[CVEEntry]:
|
|
65
|
+
"""Fetch CVEs from NVD with optional date range"""
|
|
66
|
+
await self._rate_limit()
|
|
67
|
+
|
|
68
|
+
params = {"resultsPerPage": results_per_page}
|
|
69
|
+
|
|
70
|
+
if start_date:
|
|
71
|
+
params["lastModStartDate"] = start_date.strftime("%Y-%m-%dT%H:%M:%S.000")
|
|
72
|
+
if end_date:
|
|
73
|
+
params["lastModEndDate"] = end_date.strftime("%Y-%m-%dT%H:%M:%S.000")
|
|
74
|
+
|
|
75
|
+
if self.api_key:
|
|
76
|
+
params["apiKey"] = self.api_key
|
|
77
|
+
|
|
78
|
+
async with self.session.get(self.BASE_URL, params=params) as resp:
|
|
79
|
+
if resp.status != 200:
|
|
80
|
+
raise Exception(f"NVD API error: {resp.status}")
|
|
81
|
+
|
|
82
|
+
data = await resp.json()
|
|
83
|
+
return self._parse_cves(data)
|
|
84
|
+
|
|
85
|
+
def _parse_cves(self, data: Dict) -> List[CVEEntry]:
|
|
86
|
+
"""Parse NVD API response"""
|
|
87
|
+
cves = []
|
|
88
|
+
|
|
89
|
+
for vuln in data.get("vulnerabilities", []):
|
|
90
|
+
cve_data = vuln.get("cve", {})
|
|
91
|
+
|
|
92
|
+
# Extract CVSS score
|
|
93
|
+
cvss_score = 0.0
|
|
94
|
+
severity = "unknown"
|
|
95
|
+
cvss_vector = ""
|
|
96
|
+
|
|
97
|
+
metrics = cve_data.get("metrics", {})
|
|
98
|
+
if "cvssMetricV31" in metrics:
|
|
99
|
+
cvss_data = metrics["cvssMetricV31"][0]["cvssData"]
|
|
100
|
+
cvss_score = cvss_data.get("baseScore", 0.0)
|
|
101
|
+
severity = metrics["cvssMetricV31"][0].get("baseSeverity", "unknown").lower()
|
|
102
|
+
cvss_vector = cvss_data.get("vectorString", "")
|
|
103
|
+
elif "cvssMetricV30" in metrics:
|
|
104
|
+
cvss_data = metrics["cvssMetricV30"][0]["cvssData"]
|
|
105
|
+
cvss_score = cvss_data.get("baseScore", 0.0)
|
|
106
|
+
severity = metrics["cvssMetricV30"][0].get("baseSeverity", "unknown").lower()
|
|
107
|
+
cvss_vector = cvss_data.get("vectorString", "")
|
|
108
|
+
|
|
109
|
+
# Extract description (English only)
|
|
110
|
+
descriptions = cve_data.get("descriptions", [])
|
|
111
|
+
description = ""
|
|
112
|
+
for desc in descriptions:
|
|
113
|
+
if desc.get("lang") == "en":
|
|
114
|
+
description = desc.get("value", "")
|
|
115
|
+
break
|
|
116
|
+
|
|
117
|
+
# Extract references
|
|
118
|
+
references = [
|
|
119
|
+
ref.get("url", "")
|
|
120
|
+
for ref in cve_data.get("references", [])
|
|
121
|
+
if ref.get("url")
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
# Extract CPE matches
|
|
125
|
+
cpe_matches = []
|
|
126
|
+
configurations = cve_data.get("configurations", [])
|
|
127
|
+
for config in configurations:
|
|
128
|
+
for node in config.get("nodes", []):
|
|
129
|
+
for match in node.get("cpeMatch", []):
|
|
130
|
+
if match.get("criteria"):
|
|
131
|
+
cpe_matches.append(match["criteria"])
|
|
132
|
+
|
|
133
|
+
cve = CVEEntry(
|
|
134
|
+
id=cve_data.get("id", ""),
|
|
135
|
+
published=cve_data.get("published", ""),
|
|
136
|
+
last_modified=cve_data.get("lastModified", ""),
|
|
137
|
+
description=description,
|
|
138
|
+
cvss_score=cvss_score,
|
|
139
|
+
cvss_vector=cvss_vector,
|
|
140
|
+
severity=severity,
|
|
141
|
+
references=references,
|
|
142
|
+
cpe_matches=cpe_matches
|
|
143
|
+
)
|
|
144
|
+
cves.append(cve)
|
|
145
|
+
|
|
146
|
+
return cves
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class CVEUpdater:
|
|
150
|
+
"""CVE Database Updater"""
|
|
151
|
+
|
|
152
|
+
def __init__(self, db_path: str = "data/cve_database.json"):
|
|
153
|
+
self.db_path = db_path
|
|
154
|
+
self.metadata_path = db_path.replace(".json", "_meta.json")
|
|
155
|
+
self.db_dir = os.path.dirname(db_path)
|
|
156
|
+
|
|
157
|
+
# Ensure directory exists
|
|
158
|
+
if self.db_dir:
|
|
159
|
+
os.makedirs(self.db_dir, exist_ok=True)
|
|
160
|
+
|
|
161
|
+
async def initialize_db(self):
|
|
162
|
+
"""Create initial empty database"""
|
|
163
|
+
if not os.path.exists(self.db_path):
|
|
164
|
+
await self._save_db({})
|
|
165
|
+
await self._save_metadata({"last_update": None, "total_cves": 0})
|
|
166
|
+
|
|
167
|
+
async def update(self, days_back: int = 1) -> Dict:
|
|
168
|
+
"""Update CVE database with delta from last N days"""
|
|
169
|
+
await self.initialize_db()
|
|
170
|
+
|
|
171
|
+
# Calculate date range
|
|
172
|
+
end_date = datetime.utcnow()
|
|
173
|
+
start_date = end_date - timedelta(days=days_back)
|
|
174
|
+
|
|
175
|
+
logging.info(f"Fetching CVEs from {start_date} to {end_date}")
|
|
176
|
+
|
|
177
|
+
# Fetch from NVD
|
|
178
|
+
async with NVDClient() as client:
|
|
179
|
+
cves = await client.fetch_cves(start_date, end_date)
|
|
180
|
+
|
|
181
|
+
# Load existing database
|
|
182
|
+
db = await self._load_db()
|
|
183
|
+
|
|
184
|
+
# Merge updates
|
|
185
|
+
new_count = 0
|
|
186
|
+
updated_count = 0
|
|
187
|
+
|
|
188
|
+
for cve in cves:
|
|
189
|
+
if cve.id not in db:
|
|
190
|
+
new_count += 1
|
|
191
|
+
elif db[cve.id]["last_modified"] != cve.last_modified:
|
|
192
|
+
updated_count += 1
|
|
193
|
+
|
|
194
|
+
db[cve.id] = asdict(cve)
|
|
195
|
+
|
|
196
|
+
# Save updated database
|
|
197
|
+
await self._save_db(db)
|
|
198
|
+
|
|
199
|
+
# Update metadata
|
|
200
|
+
metadata = {
|
|
201
|
+
"last_update": datetime.utcnow().isoformat(),
|
|
202
|
+
"total_cves": len(db),
|
|
203
|
+
"last_fetch_new": new_count,
|
|
204
|
+
"last_fetch_updated": updated_count,
|
|
205
|
+
"last_fetch_date_range": f"{start_date.date()} to {end_date.date()}"
|
|
206
|
+
}
|
|
207
|
+
await self._save_metadata(metadata)
|
|
208
|
+
|
|
209
|
+
logging.info(f"Update complete: {new_count} new, {updated_count} updated, {len(db)} total")
|
|
210
|
+
|
|
211
|
+
return metadata
|
|
212
|
+
|
|
213
|
+
async def full_sync(self) -> Dict:
|
|
214
|
+
"""Full database sync (last 120 days - NVD limit)"""
|
|
215
|
+
return await self.update(days_back=120)
|
|
216
|
+
|
|
217
|
+
async def _load_db(self) -> Dict:
|
|
218
|
+
"""Load database from disk"""
|
|
219
|
+
if not os.path.exists(self.db_path):
|
|
220
|
+
return {}
|
|
221
|
+
|
|
222
|
+
async with aiofiles.open(self.db_path, 'r') as f:
|
|
223
|
+
content = await f.read()
|
|
224
|
+
return json.loads(content)
|
|
225
|
+
|
|
226
|
+
async def _save_db(self, db: Dict):
|
|
227
|
+
"""Save database to disk"""
|
|
228
|
+
async with aiofiles.open(self.db_path, 'w') as f:
|
|
229
|
+
await f.write(json.dumps(db, indent=2))
|
|
230
|
+
|
|
231
|
+
async def _load_metadata(self) -> Dict:
|
|
232
|
+
"""Load metadata from disk"""
|
|
233
|
+
if not os.path.exists(self.metadata_path):
|
|
234
|
+
return {}
|
|
235
|
+
|
|
236
|
+
async with aiofiles.open(self.metadata_path, 'r') as f:
|
|
237
|
+
content = await f.read()
|
|
238
|
+
return json.loads(content)
|
|
239
|
+
|
|
240
|
+
async def _save_metadata(self, metadata: Dict):
|
|
241
|
+
"""Save metadata to disk"""
|
|
242
|
+
async with aiofiles.open(self.metadata_path, 'w') as f:
|
|
243
|
+
await f.write(json.dumps(metadata, indent=2))
|
|
244
|
+
|
|
245
|
+
def get_stats(self) -> Dict:
|
|
246
|
+
"""Get database statistics"""
|
|
247
|
+
try:
|
|
248
|
+
if not os.path.exists(self.db_path):
|
|
249
|
+
return {"status": "not_initialized"}
|
|
250
|
+
|
|
251
|
+
with open(self.db_path, 'r') as f:
|
|
252
|
+
db = json.load(f)
|
|
253
|
+
|
|
254
|
+
with open(self.metadata_path, 'r') as f:
|
|
255
|
+
metadata = json.load(f)
|
|
256
|
+
|
|
257
|
+
# Count by severity
|
|
258
|
+
severity_counts = {}
|
|
259
|
+
for cve in db.values():
|
|
260
|
+
sev = cve.get("severity", "unknown")
|
|
261
|
+
severity_counts[sev] = severity_counts.get(sev, 0) + 1
|
|
262
|
+
|
|
263
|
+
return {
|
|
264
|
+
"status": "ready",
|
|
265
|
+
"total_cves": len(db),
|
|
266
|
+
"last_update": metadata.get("last_update"),
|
|
267
|
+
"by_severity": severity_counts,
|
|
268
|
+
"metadata": metadata
|
|
269
|
+
}
|
|
270
|
+
except Exception as e:
|
|
271
|
+
return {"status": "error", "error": str(e)}
|
|
272
|
+
|
|
273
|
+
async def lookup_cve(self, cve_id: str) -> Optional[Dict]:
|
|
274
|
+
"""Lookup single CVE by ID"""
|
|
275
|
+
db = await self._load_db()
|
|
276
|
+
return db.get(cve_id)
|
|
277
|
+
|
|
278
|
+
def get_info(self) -> Dict:
|
|
279
|
+
"""Get module info"""
|
|
280
|
+
return {
|
|
281
|
+
"name": "cve_updater",
|
|
282
|
+
"version": "1.0.0",
|
|
283
|
+
"description": "CVE Database Auto-Update from NVD",
|
|
284
|
+
"source": "NVD (National Vulnerability Database)",
|
|
285
|
+
"update_frequency": "daily",
|
|
286
|
+
"rate_limit": "6 seconds between requests"
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
# CLI interface
|
|
291
|
+
if __name__ == "__main__":
|
|
292
|
+
import sys
|
|
293
|
+
|
|
294
|
+
logging.basicConfig(level=logging.INFO)
|
|
295
|
+
|
|
296
|
+
updater = CVEUpdater()
|
|
297
|
+
|
|
298
|
+
if len(sys.argv) > 1 and sys.argv[1] == "full":
|
|
299
|
+
result = asyncio.run(updater.full_sync())
|
|
300
|
+
else:
|
|
301
|
+
result = asyncio.run(updater.update())
|
|
302
|
+
|
|
303
|
+
print(json.dumps(result, indent=2))
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
"""False Positive Reduction Module
|
|
2
|
+
|
|
3
|
+
Uses ML-based heuristics and rule-based filters to reduce false positives.
|
|
4
|
+
Addresses Issue #14
|
|
5
|
+
"""
|
|
6
|
+
from typing import Dict, List, Any, Optional
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from enum import Enum
|
|
9
|
+
import re
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ConfidenceLevel(Enum):
|
|
13
|
+
HIGH = "high" # > 90% confidence
|
|
14
|
+
MEDIUM = "medium" # 70-90% confidence
|
|
15
|
+
LOW = "low" # 50-70% confidence
|
|
16
|
+
UNKNOWN = "unknown" # < 50% confidence
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class Finding:
|
|
21
|
+
"""Security finding structure"""
|
|
22
|
+
title: str
|
|
23
|
+
description: str
|
|
24
|
+
severity: str
|
|
25
|
+
cvss_score: float
|
|
26
|
+
evidence: List[str]
|
|
27
|
+
tool: str
|
|
28
|
+
confidence: float = 0.5
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class FalsePositiveFilter:
|
|
32
|
+
"""Filter false positives from security findings"""
|
|
33
|
+
|
|
34
|
+
name = "false_positive_filter"
|
|
35
|
+
version = "1.0.0"
|
|
36
|
+
|
|
37
|
+
# Known false positive patterns
|
|
38
|
+
FALSE_POSITIVE_PATTERNS = [
|
|
39
|
+
r"(?i)self-signed certificate", # Often intentional in dev
|
|
40
|
+
r"(?i)directory listing.*empty",
|
|
41
|
+
r"(?i)missing.*header.*not applicable",
|
|
42
|
+
r"(?i)information disclosure.*version.*normal",
|
|
43
|
+
r"(?i)cookie.*without.*secure.*localhost",
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
# Keywords that indicate likely false positive
|
|
47
|
+
FP_KEYWORDS = [
|
|
48
|
+
"test", "localhost", "127.0.0.1", "example.com",
|
|
49
|
+
"documentation", "intentional", "expected behavior"
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
def __init__(self):
|
|
53
|
+
self.rules_applied = 0
|
|
54
|
+
self.ml_model_loaded = False
|
|
55
|
+
|
|
56
|
+
def apply_rule_based_filter(self, finding: Finding) -> Tuple[bool, str]:
|
|
57
|
+
"""
|
|
58
|
+
Apply rule-based filtering
|
|
59
|
+
Returns: (is_false_positive, reason)
|
|
60
|
+
"""
|
|
61
|
+
# Check against known FP patterns
|
|
62
|
+
combined_text = f"{finding.title} {finding.description}"
|
|
63
|
+
|
|
64
|
+
for pattern in self.FALSE_POSITIVE_PATTERNS:
|
|
65
|
+
if re.search(pattern, combined_text):
|
|
66
|
+
return True, f"Matched FP pattern: {pattern}"
|
|
67
|
+
|
|
68
|
+
# Check for FP keywords
|
|
69
|
+
for keyword in self.FP_KEYWORDS:
|
|
70
|
+
if keyword.lower() in combined_text.lower():
|
|
71
|
+
return True, f"Contains FP keyword: {keyword}"
|
|
72
|
+
|
|
73
|
+
# Check confidence score
|
|
74
|
+
if finding.confidence < 0.3:
|
|
75
|
+
return True, "Confidence below threshold (0.3)"
|
|
76
|
+
|
|
77
|
+
return False, ""
|
|
78
|
+
|
|
79
|
+
def apply_ml_filter(self, finding: Finding) -> float:
|
|
80
|
+
"""
|
|
81
|
+
ML-based false positive probability
|
|
82
|
+
Returns: probability (0.0 - 1.0) that this is a false positive
|
|
83
|
+
"""
|
|
84
|
+
# In production, this would use a trained model
|
|
85
|
+
# For now, use heuristics as proxy
|
|
86
|
+
|
|
87
|
+
fp_score = 0.0
|
|
88
|
+
|
|
89
|
+
# Low evidence count = higher FP probability
|
|
90
|
+
if len(finding.evidence) == 0:
|
|
91
|
+
fp_score += 0.3
|
|
92
|
+
elif len(finding.evidence) < 2:
|
|
93
|
+
fp_score += 0.1
|
|
94
|
+
|
|
95
|
+
# Generic titles = higher FP probability
|
|
96
|
+
generic_patterns = [r"(?i)vulnerability", r"(?i)issue", r"(?i)problem"]
|
|
97
|
+
for pattern in generic_patterns:
|
|
98
|
+
if re.search(pattern, finding.title) and len(finding.title) < 30:
|
|
99
|
+
fp_score += 0.15
|
|
100
|
+
|
|
101
|
+
# Low severity + low confidence = likely FP
|
|
102
|
+
if finding.severity == "low" and finding.confidence < 0.5:
|
|
103
|
+
fp_score += 0.2
|
|
104
|
+
|
|
105
|
+
return min(fp_score, 1.0)
|
|
106
|
+
|
|
107
|
+
def filter_findings(
|
|
108
|
+
self,
|
|
109
|
+
findings: List[Finding],
|
|
110
|
+
fp_threshold: float = 0.7
|
|
111
|
+
) -> Dict[str, List[Finding]]:
|
|
112
|
+
"""
|
|
113
|
+
Filter findings and separate true positives from false positives
|
|
114
|
+
"""
|
|
115
|
+
true_positives = []
|
|
116
|
+
false_positives = []
|
|
117
|
+
|
|
118
|
+
for finding in findings:
|
|
119
|
+
# Rule-based filter
|
|
120
|
+
is_fp_rule, reason = self.apply_rule_based_filter(finding)
|
|
121
|
+
|
|
122
|
+
# ML filter
|
|
123
|
+
fp_probability = self.apply_ml_filter(finding)
|
|
124
|
+
|
|
125
|
+
# Combine filters
|
|
126
|
+
if is_fp_rule or fp_probability > fp_threshold:
|
|
127
|
+
false_positives.append({
|
|
128
|
+
'finding': finding,
|
|
129
|
+
'fp_probability': fp_probability,
|
|
130
|
+
'reason': reason if is_fp_rule else f"ML score: {fp_probability:.2f}"
|
|
131
|
+
})
|
|
132
|
+
else:
|
|
133
|
+
true_positives.append(finding)
|
|
134
|
+
|
|
135
|
+
return {
|
|
136
|
+
'true_positives': true_positives,
|
|
137
|
+
'false_positives': false_positives,
|
|
138
|
+
'reduction_rate': len(false_positives) / len(findings) if findings else 0
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
def get_info(self) -> Dict:
|
|
142
|
+
"""Get module info"""
|
|
143
|
+
return {
|
|
144
|
+
'name': self.name,
|
|
145
|
+
'version': self.version,
|
|
146
|
+
'description': 'ML-based false positive reduction',
|
|
147
|
+
'rules_count': len(self.FALSE_POSITIVE_PATTERNS),
|
|
148
|
+
'threshold': 0.7
|
|
149
|
+
}
|