lanscape 1.4.4__py3-none-any.whl → 2.0.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lanscape/__init__.py +9 -4
- lanscape/__main__.py +1 -0
- lanscape/{libraries → core}/app_scope.py +22 -3
- lanscape/{libraries → core}/decorators.py +88 -52
- lanscape/{libraries → core}/device_alive.py +4 -3
- lanscape/{libraries → core}/errors.py +1 -0
- lanscape/{libraries → core}/ip_parser.py +2 -1
- lanscape/{libraries → core}/logger.py +1 -0
- lanscape/{libraries → core}/mac_lookup.py +1 -0
- lanscape/{libraries → core}/net_tools.py +140 -46
- lanscape/{libraries → core}/port_manager.py +1 -0
- lanscape/{libraries → core}/runtime_args.py +1 -0
- lanscape/{libraries → core}/scan_config.py +104 -5
- lanscape/core/service_scan.py +205 -0
- lanscape/{libraries → core}/subnet_scan.py +19 -11
- lanscape/{libraries → core}/version_manager.py +3 -2
- lanscape/{libraries → core}/web_browser.py +1 -0
- lanscape/resources/mac_addresses/convert_csv.py +1 -0
- lanscape/resources/ports/convert_csv.py +1 -0
- lanscape/resources/services/definitions.jsonc +576 -400
- lanscape/ui/app.py +5 -4
- lanscape/ui/blueprints/__init__.py +2 -1
- lanscape/ui/blueprints/api/__init__.py +1 -0
- lanscape/ui/blueprints/api/port.py +2 -1
- lanscape/ui/blueprints/api/scan.py +2 -1
- lanscape/ui/blueprints/api/tools.py +5 -4
- lanscape/ui/blueprints/web/__init__.py +1 -0
- lanscape/ui/blueprints/web/routes.py +30 -2
- lanscape/ui/main.py +5 -4
- lanscape/ui/shutdown_handler.py +2 -1
- lanscape/ui/static/css/style.css +145 -2
- lanscape/ui/static/js/main.js +30 -2
- lanscape/ui/static/js/scan-config.js +39 -0
- lanscape/ui/templates/scan/config.html +43 -0
- lanscape/ui/templates/scan/device-detail.html +111 -0
- lanscape/ui/templates/scan/ip-table-row.html +12 -78
- lanscape/ui/templates/scan/ip-table.html +1 -1
- {lanscape-1.4.4.dist-info → lanscape-2.0.0a1.dist-info}/METADATA +7 -2
- lanscape-2.0.0a1.dist-info/RECORD +76 -0
- lanscape-2.0.0a1.dist-info/entry_points.txt +2 -0
- lanscape/libraries/service_scan.py +0 -50
- lanscape-1.4.4.dist-info/RECORD +0 -74
- /lanscape/{libraries → core}/__init__.py +0 -0
- {lanscape-1.4.4.dist-info → lanscape-2.0.0a1.dist-info}/WHEEL +0 -0
- {lanscape-1.4.4.dist-info → lanscape-2.0.0a1.dist-info}/licenses/LICENSE +0 -0
- {lanscape-1.4.4.dist-info → lanscape-2.0.0a1.dist-info}/top_level.txt +0 -0
lanscape/__init__.py
CHANGED
|
@@ -1,20 +1,25 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Local network scanner
|
|
3
3
|
"""
|
|
4
|
-
from lanscape.
|
|
4
|
+
from lanscape.core.subnet_scan import (
|
|
5
5
|
SubnetScanner,
|
|
6
|
+
ScannerResults,
|
|
6
7
|
ScanManager
|
|
7
8
|
)
|
|
8
9
|
|
|
9
|
-
from lanscape.
|
|
10
|
+
from lanscape.core.scan_config import (
|
|
10
11
|
ScanConfig,
|
|
11
12
|
ArpConfig,
|
|
12
13
|
PingConfig,
|
|
13
14
|
PokeConfig,
|
|
14
15
|
ArpCacheConfig,
|
|
16
|
+
PortScanConfig,
|
|
17
|
+
ServiceScanConfig,
|
|
18
|
+
ServiceScanStrategy,
|
|
15
19
|
ScanType
|
|
16
20
|
)
|
|
17
21
|
|
|
18
|
-
from lanscape.
|
|
22
|
+
from lanscape.core.port_manager import PortManager
|
|
23
|
+
|
|
24
|
+
from lanscape.core import net_tools
|
|
19
25
|
|
|
20
|
-
from lanscape.libraries import net_tools
|
lanscape/__main__.py
CHANGED
|
@@ -5,7 +5,6 @@ Resource and environment management utilities for Lanscape.
|
|
|
5
5
|
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
import json
|
|
8
|
-
import re
|
|
9
8
|
|
|
10
9
|
|
|
11
10
|
class ResourceManager:
|
|
@@ -32,9 +31,28 @@ class ResourceManager:
|
|
|
32
31
|
return json.loads(self.get(asset_name))
|
|
33
32
|
|
|
34
33
|
def get_jsonc(self, asset_name: str):
|
|
35
|
-
"""
|
|
34
|
+
"""AI Slop to get JSONC (JSON with comments) content of an asset as a JSON object."""
|
|
36
35
|
content = self.get(asset_name)
|
|
37
|
-
|
|
36
|
+
|
|
37
|
+
def strip_jsonc_lines(text):
|
|
38
|
+
result = []
|
|
39
|
+
in_string = False
|
|
40
|
+
escape = False
|
|
41
|
+
for line in text.splitlines():
|
|
42
|
+
new_line = []
|
|
43
|
+
i = 0
|
|
44
|
+
while i < len(line):
|
|
45
|
+
char = line[i]
|
|
46
|
+
if char == '"' and not escape:
|
|
47
|
+
in_string = not in_string
|
|
48
|
+
if not in_string and line[i:i + 2] == "//":
|
|
49
|
+
break # Ignore rest of line (comment)
|
|
50
|
+
new_line.append(char)
|
|
51
|
+
escape = (char == '\\' and not escape)
|
|
52
|
+
i += 1
|
|
53
|
+
result.append(''.join(new_line))
|
|
54
|
+
return '\n'.join(result)
|
|
55
|
+
cleaned_content = strip_jsonc_lines(content)
|
|
38
56
|
return json.loads(cleaned_content)
|
|
39
57
|
|
|
40
58
|
def update(self, asset_name: str, content: str):
|
|
@@ -72,3 +90,4 @@ def is_local_run() -> bool:
|
|
|
72
90
|
if any(parts):
|
|
73
91
|
return False
|
|
74
92
|
return True # Installed package
|
|
93
|
+
|
|
@@ -2,13 +2,11 @@
|
|
|
2
2
|
"""Decorators and job tracking utilities for Lanscape."""
|
|
3
3
|
|
|
4
4
|
from time import time
|
|
5
|
-
from dataclasses import dataclass, field
|
|
6
|
-
from typing import DefaultDict
|
|
7
5
|
from collections import defaultdict
|
|
8
|
-
import inspect
|
|
9
6
|
import functools
|
|
10
7
|
import concurrent.futures
|
|
11
8
|
import logging
|
|
9
|
+
import threading
|
|
12
10
|
from tabulate import tabulate
|
|
13
11
|
|
|
14
12
|
|
|
@@ -39,31 +37,74 @@ def run_once(func):
|
|
|
39
37
|
return wrapper
|
|
40
38
|
|
|
41
39
|
|
|
42
|
-
@dataclass
|
|
43
40
|
class JobStats:
|
|
44
41
|
"""
|
|
42
|
+
Thread-safe singleton for tracking job statistics across all classes.
|
|
45
43
|
Tracks statistics for job execution, including running, finished, and timing data.
|
|
46
44
|
"""
|
|
47
|
-
running: DefaultDict[str, int] = field(
|
|
48
|
-
default_factory=lambda: defaultdict(int))
|
|
49
|
-
finished: DefaultDict[str, int] = field(
|
|
50
|
-
default_factory=lambda: defaultdict(int))
|
|
51
|
-
timing: DefaultDict[str, float] = field(
|
|
52
|
-
default_factory=lambda: defaultdict(float))
|
|
53
45
|
|
|
54
46
|
_instance = None
|
|
47
|
+
_lock = threading.Lock()
|
|
48
|
+
|
|
49
|
+
def __new__(cls):
|
|
50
|
+
if cls._instance is None:
|
|
51
|
+
with cls._lock:
|
|
52
|
+
if cls._instance is None: # Double-checked locking
|
|
53
|
+
cls._instance = super().__new__(cls)
|
|
54
|
+
return cls._instance
|
|
55
55
|
|
|
56
56
|
def __init__(self):
|
|
57
|
-
|
|
58
|
-
|
|
57
|
+
if not hasattr(self, '_initialized'):
|
|
58
|
+
self._stats_lock = threading.RLock()
|
|
59
59
|
self.running = defaultdict(int)
|
|
60
60
|
self.finished = defaultdict(int)
|
|
61
61
|
self.timing = defaultdict(float)
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
62
|
+
self._initialized = True
|
|
63
|
+
|
|
64
|
+
def start_job(self, func_name: str):
|
|
65
|
+
"""Thread-safe increment of running counter."""
|
|
66
|
+
with self._stats_lock:
|
|
67
|
+
self.running[func_name] += 1
|
|
68
|
+
|
|
69
|
+
def finish_job(self, func_name: str, elapsed_time: float):
|
|
70
|
+
"""Thread-safe update of job completion and timing."""
|
|
71
|
+
with self._stats_lock:
|
|
72
|
+
self.running[func_name] -= 1
|
|
73
|
+
self.finished[func_name] += 1
|
|
74
|
+
|
|
75
|
+
# Calculate running average
|
|
76
|
+
count = self.finished[func_name]
|
|
77
|
+
old_avg = self.timing[func_name]
|
|
78
|
+
new_avg = (old_avg * (count - 1) + elapsed_time) / count
|
|
79
|
+
self.timing[func_name] = round(new_avg, 4)
|
|
80
|
+
|
|
81
|
+
# Cleanup running if zero
|
|
82
|
+
if self.running[func_name] <= 0:
|
|
83
|
+
self.running.pop(func_name, None)
|
|
84
|
+
|
|
85
|
+
def clear_stats(self):
|
|
86
|
+
"""Clear all statistics (useful between scans)."""
|
|
87
|
+
with self._stats_lock:
|
|
88
|
+
self.running.clear()
|
|
89
|
+
self.finished.clear()
|
|
90
|
+
self.timing.clear()
|
|
91
|
+
|
|
92
|
+
def get_stats_copy(self) -> dict:
|
|
93
|
+
"""Get a thread-safe copy of current statistics."""
|
|
94
|
+
with self._stats_lock:
|
|
95
|
+
return {
|
|
96
|
+
'running': dict(self.running),
|
|
97
|
+
'finished': dict(self.finished),
|
|
98
|
+
'timing': dict(self.timing)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
@classmethod
|
|
102
|
+
def reset_for_testing(cls):
|
|
103
|
+
"""Reset singleton instance for testing purposes only."""
|
|
104
|
+
with cls._lock:
|
|
105
|
+
if cls._instance:
|
|
106
|
+
cls._instance.clear_stats()
|
|
107
|
+
cls._instance = None
|
|
67
108
|
|
|
68
109
|
def __str__(self):
|
|
69
110
|
"""Return a formatted string representation of the job statistics."""
|
|
@@ -106,48 +147,40 @@ def job_tracker(func):
|
|
|
106
147
|
Return the function name with the class name prepended if available.
|
|
107
148
|
"""
|
|
108
149
|
qual_parts = func.__qualname__.split(".")
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
if
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
if
|
|
116
|
-
|
|
150
|
+
|
|
151
|
+
# If function has class context (e.g., "ClassName.method_name")
|
|
152
|
+
if len(qual_parts) > 1:
|
|
153
|
+
cls_name = qual_parts[-2]
|
|
154
|
+
|
|
155
|
+
# Check if first_arg is an instance and has the expected class name
|
|
156
|
+
if first_arg is not None and hasattr(first_arg, '__class__'):
|
|
157
|
+
if first_arg.__class__.__name__ == cls_name:
|
|
158
|
+
return f"{cls_name}.{func.__name__}"
|
|
159
|
+
|
|
117
160
|
return func.__name__
|
|
118
161
|
|
|
162
|
+
@functools.wraps(func)
|
|
119
163
|
def wrapper(*args, **kwargs):
|
|
120
164
|
"""Wrap the function to update job statistics before and after execution."""
|
|
121
|
-
class_instance = args[0]
|
|
122
165
|
job_stats = JobStats()
|
|
123
|
-
fxn = get_fxn_src_name(
|
|
124
|
-
func,
|
|
125
|
-
class_instance
|
|
126
|
-
)
|
|
127
|
-
|
|
128
|
-
# Increment running counter and track execution time
|
|
129
|
-
job_stats.running[fxn] += 1
|
|
130
|
-
start = time()
|
|
131
166
|
|
|
132
|
-
|
|
167
|
+
# Determine function name for tracking
|
|
168
|
+
if args:
|
|
169
|
+
fxn = get_fxn_src_name(func, args[0])
|
|
170
|
+
else:
|
|
171
|
+
fxn = func.__name__
|
|
133
172
|
|
|
134
|
-
#
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
job_stats.finished[fxn] += 1
|
|
138
|
-
|
|
139
|
-
# Calculate the new average timing for the function
|
|
140
|
-
job_stats.timing[fxn] = round(
|
|
141
|
-
((job_stats.finished[fxn] - 1) * job_stats.timing[fxn] + elapsed)
|
|
142
|
-
/ job_stats.finished[fxn],
|
|
143
|
-
4
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
# Clean up if no more running instances of this function
|
|
147
|
-
if job_stats.running[fxn] == 0:
|
|
148
|
-
job_stats.running.pop(fxn)
|
|
173
|
+
# Start job tracking
|
|
174
|
+
job_stats.start_job(fxn)
|
|
175
|
+
start = time()
|
|
149
176
|
|
|
150
|
-
|
|
177
|
+
try:
|
|
178
|
+
result = func(*args, **kwargs) # Execute the wrapped function
|
|
179
|
+
return result
|
|
180
|
+
finally:
|
|
181
|
+
# Always update statistics, even if function raises exception
|
|
182
|
+
elapsed = time() - start
|
|
183
|
+
job_stats.finish_job(fxn, elapsed)
|
|
151
184
|
|
|
152
185
|
return wrapper
|
|
153
186
|
|
|
@@ -179,7 +212,9 @@ def timeout_enforcer(timeout: int, raise_on_timeout: bool = True):
|
|
|
179
212
|
@functools.wraps(func)
|
|
180
213
|
def wrapper(*args, **kwargs):
|
|
181
214
|
"""Wrap the function to enforce a timeout on its execution."""
|
|
182
|
-
with concurrent.futures.ThreadPoolExecutor(
|
|
215
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
216
|
+
max_workers=1,
|
|
217
|
+
thread_name_prefix="TimeoutEnforcer") as executor:
|
|
183
218
|
future = executor.submit(func, *args, **kwargs)
|
|
184
219
|
try:
|
|
185
220
|
return future.result(
|
|
@@ -194,3 +229,4 @@ def timeout_enforcer(timeout: int, raise_on_timeout: bool = True):
|
|
|
194
229
|
return None # Return None if not raising an exception
|
|
195
230
|
return wrapper
|
|
196
231
|
return decorator
|
|
232
|
+
|
|
@@ -13,12 +13,12 @@ from scapy.sendrecv import srp
|
|
|
13
13
|
from scapy.layers.l2 import ARP, Ether
|
|
14
14
|
from icmplib import ping
|
|
15
15
|
|
|
16
|
-
from lanscape.
|
|
17
|
-
from lanscape.
|
|
16
|
+
from lanscape.core.net_tools import Device
|
|
17
|
+
from lanscape.core.scan_config import (
|
|
18
18
|
ScanConfig, ScanType, PingConfig,
|
|
19
19
|
ArpConfig, PokeConfig, ArpCacheConfig
|
|
20
20
|
)
|
|
21
|
-
from lanscape.
|
|
21
|
+
from lanscape.core.decorators import timeout_enforcer, job_tracker
|
|
22
22
|
|
|
23
23
|
|
|
24
24
|
def is_device_alive(device: Device, scan_config: ScanConfig) -> bool:
|
|
@@ -227,3 +227,4 @@ class Poker():
|
|
|
227
227
|
sock.close()
|
|
228
228
|
|
|
229
229
|
do_poke()
|
|
230
|
+
|
|
@@ -12,7 +12,7 @@ It also includes validation to prevent processing excessively large IP ranges.
|
|
|
12
12
|
import ipaddress
|
|
13
13
|
import re
|
|
14
14
|
|
|
15
|
-
from lanscape.
|
|
15
|
+
from lanscape.core.errors import SubnetTooLargeError
|
|
16
16
|
|
|
17
17
|
MAX_IPS_ALLOWED = 100000
|
|
18
18
|
|
|
@@ -139,3 +139,4 @@ def ip_range_to_list(start_ip, end_ip):
|
|
|
139
139
|
# Yield the range of IPs
|
|
140
140
|
for ip_int in range(int(start_ip), int(end_ip) + 1):
|
|
141
141
|
yield ipaddress.IPv4Address(ip_int)
|
|
142
|
+
|
|
@@ -1,44 +1,97 @@
|
|
|
1
1
|
"""Network tools for scanning and managing devices on a network."""
|
|
2
2
|
|
|
3
|
-
import logging
|
|
4
3
|
import ipaddress
|
|
5
|
-
import
|
|
6
|
-
import
|
|
7
|
-
from typing import List, Dict
|
|
4
|
+
import logging
|
|
5
|
+
import re
|
|
8
6
|
import socket
|
|
9
7
|
import struct
|
|
10
|
-
import
|
|
11
|
-
import
|
|
8
|
+
import subprocess
|
|
9
|
+
import traceback
|
|
10
|
+
from time import sleep
|
|
11
|
+
from typing import List, Dict, Optional
|
|
12
12
|
|
|
13
|
+
import psutil
|
|
13
14
|
from scapy.sendrecv import srp
|
|
14
15
|
from scapy.layers.l2 import ARP, Ether
|
|
15
16
|
from scapy.error import Scapy_Exception
|
|
16
17
|
|
|
17
|
-
from
|
|
18
|
-
|
|
19
|
-
from
|
|
20
|
-
|
|
21
|
-
|
|
18
|
+
from pydantic import BaseModel, PrivateAttr
|
|
19
|
+
try:
|
|
20
|
+
from pydantic import ConfigDict, computed_field, model_serializer # pydantic v2
|
|
21
|
+
_PYD_V2 = True
|
|
22
|
+
except Exception: # pragma: no cover
|
|
23
|
+
CONFIG_DICT = None # type: ignore # pylint: disable=invalid-name
|
|
24
|
+
COMPUTED_FIELD = None # type: ignore # pylint: disable=invalid-name
|
|
25
|
+
MODEL_SERIALIZER = None # type: ignore # pylint: disable=invalid-name
|
|
26
|
+
_PYD_V2 = False
|
|
27
|
+
else:
|
|
28
|
+
CONFIG_DICT = ConfigDict # pylint: disable=invalid-name
|
|
29
|
+
COMPUTED_FIELD = computed_field # pylint: disable=invalid-name
|
|
30
|
+
MODEL_SERIALIZER = model_serializer # pylint: disable=invalid-name
|
|
31
|
+
|
|
32
|
+
from lanscape.core.service_scan import scan_service
|
|
33
|
+
from lanscape.core.mac_lookup import MacLookup, get_macs
|
|
34
|
+
from lanscape.core.ip_parser import get_address_count, MAX_IPS_ALLOWED
|
|
35
|
+
from lanscape.core.errors import DeviceError
|
|
36
|
+
from lanscape.core.decorators import job_tracker, run_once, timeout_enforcer
|
|
37
|
+
from lanscape.core.scan_config import ServiceScanConfig, PortScanConfig
|
|
22
38
|
|
|
23
39
|
log = logging.getLogger('NetTools')
|
|
24
40
|
mac_lookup = MacLookup()
|
|
25
41
|
|
|
26
42
|
|
|
27
|
-
class Device:
|
|
43
|
+
class Device(BaseModel):
|
|
28
44
|
"""Represents a network device with metadata and scanning capabilities."""
|
|
29
45
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
46
|
+
ip: str
|
|
47
|
+
alive: Optional[bool] = None
|
|
48
|
+
hostname: Optional[str] = None
|
|
49
|
+
macs: List[str] = []
|
|
50
|
+
manufacturer: Optional[str] = None
|
|
51
|
+
ports: List[int] = []
|
|
52
|
+
stage: str = 'found'
|
|
53
|
+
ports_scanned: int = 0
|
|
54
|
+
services: Dict[str, List[int]] = {}
|
|
55
|
+
caught_errors: List[DeviceError] = []
|
|
56
|
+
job_stats: Optional[Dict] = None
|
|
57
|
+
|
|
58
|
+
_log: logging.Logger = PrivateAttr(default_factory=lambda: logging.getLogger('Device'))
|
|
59
|
+
# Support pydantic v1 and v2 configs
|
|
60
|
+
if _PYD_V2 and CONFIG_DICT:
|
|
61
|
+
model_config = CONFIG_DICT(arbitrary_types_allowed=True) # type: ignore[assignment]
|
|
62
|
+
else: # pragma: no cover
|
|
63
|
+
class Config: # pylint: disable=too-few-public-methods
|
|
64
|
+
"""Pydantic v1 configuration."""
|
|
65
|
+
arbitrary_types_allowed = True
|
|
66
|
+
extra = 'allow'
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def log(self) -> logging.Logger:
|
|
70
|
+
"""Get the logger instance for this device."""
|
|
71
|
+
return self._log
|
|
72
|
+
|
|
73
|
+
# Computed fields for pydantic v2 (included in model_dump)
|
|
74
|
+
if _PYD_V2 and COMPUTED_FIELD:
|
|
75
|
+
@COMPUTED_FIELD(return_type=str) # type: ignore[misc]
|
|
76
|
+
@property
|
|
77
|
+
def mac_addr(self) -> str:
|
|
78
|
+
"""Get the primary MAC address for this device."""
|
|
79
|
+
return self.get_mac() or ""
|
|
80
|
+
|
|
81
|
+
@MODEL_SERIALIZER(mode='wrap') # type: ignore[misc]
|
|
82
|
+
def _serialize(self, serializer):
|
|
83
|
+
"""Serialize device data for output."""
|
|
84
|
+
data = serializer(self)
|
|
85
|
+
# Remove internals
|
|
86
|
+
data.pop('job_stats', None)
|
|
87
|
+
# Ensure mac_addr present (computed_field already adds it)
|
|
88
|
+
data['mac_addr'] = data.get('mac_addr') or (self.get_mac() or '')
|
|
89
|
+
# Ensure manufacturer present; prefer explicit model value
|
|
90
|
+
manuf = data.get('manufacturer')
|
|
91
|
+
if not manuf:
|
|
92
|
+
data['manufacturer'] = self._get_manufacturer(
|
|
93
|
+
data['mac_addr']) if data['mac_addr'] else None
|
|
94
|
+
return data
|
|
42
95
|
|
|
43
96
|
def get_metadata(self):
|
|
44
97
|
"""Retrieve metadata such as hostname and MAC addresses."""
|
|
@@ -46,32 +99,72 @@ class Device:
|
|
|
46
99
|
self.hostname = self._get_hostname()
|
|
47
100
|
self._get_mac_addresses()
|
|
48
101
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
102
|
+
# Fallback for pydantic v1: use dict() and enrich output
|
|
103
|
+
if not _PYD_V2:
|
|
104
|
+
def dict(self, *args, **kwargs) -> dict: # type: ignore[override]
|
|
105
|
+
"""Generate dictionary representation for pydantic v1."""
|
|
106
|
+
data = super().dict(*args, **kwargs)
|
|
107
|
+
data.pop('job_stats', None)
|
|
108
|
+
mac_addr = self.get_mac() or ''
|
|
109
|
+
data['mac_addr'] = mac_addr
|
|
110
|
+
if not data.get('manufacturer'):
|
|
111
|
+
data['manufacturer'] = self._get_manufacturer(mac_addr) if mac_addr else None
|
|
112
|
+
return data
|
|
113
|
+
else:
|
|
114
|
+
# In v2, route dict() to model_dump() so callers get the serialized enrichment
|
|
115
|
+
def dict(self, *args, **kwargs) -> dict: # type: ignore[override]
|
|
116
|
+
"""Generate dictionary representation for pydantic v2."""
|
|
117
|
+
try:
|
|
118
|
+
return self.model_dump(*args, **kwargs) # type: ignore[attr-defined]
|
|
119
|
+
except Exception:
|
|
120
|
+
# Safety fallback (shouldn't normally hit)
|
|
121
|
+
data = self.__dict__.copy()
|
|
122
|
+
data.pop('_log', None)
|
|
123
|
+
data.pop('job_stats', None)
|
|
124
|
+
mac_addr = self.get_mac() or ''
|
|
125
|
+
data['mac_addr'] = mac_addr
|
|
126
|
+
if not data.get('manufacturer'):
|
|
127
|
+
data['manufacturer'] = self._get_manufacturer(mac_addr) if mac_addr else None
|
|
128
|
+
return data
|
|
129
|
+
|
|
130
|
+
def test_port(self, port: int, port_config: Optional[PortScanConfig] = None) -> bool:
|
|
61
131
|
"""Test if a specific port is open on the device."""
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
132
|
+
if port_config is None:
|
|
133
|
+
port_config = PortScanConfig() # Use defaults
|
|
134
|
+
|
|
135
|
+
# Calculate timeout enforcer: (timeout * (retries+1) * 1.5)
|
|
136
|
+
enforcer_timeout = port_config.timeout * (port_config.retries + 1) * 1.5
|
|
137
|
+
|
|
138
|
+
@timeout_enforcer(enforcer_timeout, False)
|
|
139
|
+
def do_test():
|
|
140
|
+
for attempt in range(port_config.retries + 1):
|
|
141
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
142
|
+
sock.settimeout(port_config.timeout)
|
|
143
|
+
try:
|
|
144
|
+
result = sock.connect_ex((self.ip, port))
|
|
145
|
+
if result == 0:
|
|
146
|
+
if port not in self.ports:
|
|
147
|
+
self.ports.append(port)
|
|
148
|
+
return True
|
|
149
|
+
except Exception:
|
|
150
|
+
pass # Connection failed, try again if retries remain
|
|
151
|
+
finally:
|
|
152
|
+
sock.close()
|
|
153
|
+
|
|
154
|
+
# Wait before retry (except on last attempt)
|
|
155
|
+
if attempt < port_config.retries:
|
|
156
|
+
sleep(port_config.retry_delay)
|
|
157
|
+
|
|
158
|
+
return False
|
|
159
|
+
|
|
160
|
+
ans = do_test() or False
|
|
161
|
+
self.ports_scanned += 1
|
|
162
|
+
return ans
|
|
70
163
|
|
|
71
164
|
@job_tracker
|
|
72
|
-
def scan_service(self, port: int):
|
|
165
|
+
def scan_service(self, port: int, cfg: ServiceScanConfig):
|
|
73
166
|
"""Scan a specific port for services."""
|
|
74
|
-
service = scan_service(self.ip, port)
|
|
167
|
+
service = scan_service(self.ip, port, cfg)
|
|
75
168
|
service_ports = self.services.get(service, [])
|
|
76
169
|
service_ports.append(port)
|
|
77
170
|
self.services[service] = service_ports
|
|
@@ -473,3 +566,4 @@ def is_arp_supported():
|
|
|
473
566
|
return True
|
|
474
567
|
except (Scapy_Exception, PermissionError, RuntimeError):
|
|
475
568
|
return False
|
|
569
|
+
|