souleyez 2.22.0__py3-none-any.whl → 2.27.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of souleyez might be problematic. Click here for more details.
- souleyez/__init__.py +1 -1
- souleyez/assets/__init__.py +1 -0
- souleyez/assets/souleyez-icon.png +0 -0
- souleyez/core/msf_sync_manager.py +15 -5
- souleyez/core/tool_chaining.py +126 -26
- souleyez/detection/validator.py +4 -2
- souleyez/docs/README.md +2 -2
- souleyez/docs/user-guide/configuration.md +1 -1
- souleyez/docs/user-guide/installation.md +14 -1
- souleyez/engine/background.py +620 -154
- souleyez/engine/result_handler.py +262 -1
- souleyez/engine/worker_manager.py +98 -2
- souleyez/main.py +103 -4
- souleyez/parsers/crackmapexec_parser.py +101 -43
- souleyez/parsers/dnsrecon_parser.py +50 -35
- souleyez/parsers/enum4linux_parser.py +101 -21
- souleyez/parsers/http_fingerprint_parser.py +319 -0
- souleyez/parsers/hydra_parser.py +56 -5
- souleyez/parsers/impacket_parser.py +123 -44
- souleyez/parsers/john_parser.py +47 -14
- souleyez/parsers/msf_parser.py +20 -5
- souleyez/parsers/nmap_parser.py +48 -27
- souleyez/parsers/smbmap_parser.py +39 -23
- souleyez/parsers/sqlmap_parser.py +18 -9
- souleyez/parsers/theharvester_parser.py +21 -13
- souleyez/plugins/http_fingerprint.py +598 -0
- souleyez/plugins/nuclei.py +41 -17
- souleyez/ui/interactive.py +99 -7
- souleyez/ui/setup_wizard.py +93 -5
- souleyez/ui/tool_setup.py +52 -52
- souleyez/utils/tool_checker.py +45 -5
- {souleyez-2.22.0.dist-info → souleyez-2.27.0.dist-info}/METADATA +16 -3
- {souleyez-2.22.0.dist-info → souleyez-2.27.0.dist-info}/RECORD +37 -33
- {souleyez-2.22.0.dist-info → souleyez-2.27.0.dist-info}/WHEEL +0 -0
- {souleyez-2.22.0.dist-info → souleyez-2.27.0.dist-info}/entry_points.txt +0 -0
- {souleyez-2.22.0.dist-info → souleyez-2.27.0.dist-info}/licenses/LICENSE +0 -0
- {souleyez-2.22.0.dist-info → souleyez-2.27.0.dist-info}/top_level.txt +0 -0
|
@@ -72,42 +72,61 @@ def _parse_content(content: str, target: str) -> Dict[str, Any]:
|
|
|
72
72
|
'auth_info': {}
|
|
73
73
|
}
|
|
74
74
|
|
|
75
|
+
# Remove ANSI color codes first
|
|
76
|
+
content = re.sub(r'\x1b\[[0-9;]*m', '', content)
|
|
77
|
+
|
|
75
78
|
for line in content.split('\n'):
|
|
76
79
|
# Parse host information (Windows OR Unix/Samba)
|
|
77
|
-
# Format
|
|
78
|
-
|
|
79
|
-
|
|
80
|
+
# Format variations:
|
|
81
|
+
# SMB 10.0.0.88 445 HOSTNAME [*] Windows/Unix ... (name:HOSTNAME) (domain:DOMAIN) ...
|
|
82
|
+
# SMB 10.0.0.88 445 HOSTNAME [*] Windows Server 2016 ...
|
|
83
|
+
# WINRM 10.0.0.88 5985 HOSTNAME [*] http://10.0.0.88:5985/wsman
|
|
84
|
+
|
|
85
|
+
os_keywords = ['Windows', 'Unix', 'Samba', 'Linux', 'Server', 'Microsoft']
|
|
86
|
+
if any(proto in line for proto in ['SMB', 'WINRM', 'SSH', 'RDP']) and '[*]' in line:
|
|
87
|
+
# Try multiple patterns for host info
|
|
88
|
+
host_match = None
|
|
89
|
+
|
|
90
|
+
# Pattern 1: Standard format with flexible whitespace
|
|
91
|
+
host_match = re.search(r'(\d+\.\d+\.\d+\.\d+)\s+(\d+)\s+(\S+)\s+\[\*\]\s*(.+)', line)
|
|
92
|
+
|
|
93
|
+
# Pattern 2: Protocol prefix format
|
|
94
|
+
if not host_match:
|
|
95
|
+
host_match = re.search(r'(?:SMB|WINRM|SSH|RDP)\s+(\d+\.\d+\.\d+\.\d+)\s+(\d+)\s+(\S+)\s+\[\*\]\s*(.+)', line)
|
|
96
|
+
|
|
80
97
|
if host_match:
|
|
81
98
|
ip = host_match.group(1)
|
|
82
99
|
port = int(host_match.group(2))
|
|
83
100
|
hostname = host_match.group(3)
|
|
84
101
|
details = host_match.group(4).strip()
|
|
85
102
|
|
|
86
|
-
#
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
'
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
103
|
+
# Only process as host info if it looks like OS/version info
|
|
104
|
+
if any(kw in details for kw in os_keywords) or '(domain:' in details:
|
|
105
|
+
# Extract domain from (domain:DOMAIN) or domain: pattern
|
|
106
|
+
domain_match = re.search(r'\(?domain:?\s*([^)\s]+)\)?', details, re.IGNORECASE)
|
|
107
|
+
domain = domain_match.group(1) if domain_match else None
|
|
108
|
+
|
|
109
|
+
# Extract OS info (everything before the first parenthesis)
|
|
110
|
+
os_match = re.match(r'([^(]+)', details)
|
|
111
|
+
os_info = os_match.group(1).strip() if os_match else details
|
|
112
|
+
|
|
113
|
+
# Extract SMB signing status (multiple formats)
|
|
114
|
+
signing_match = re.search(r'\(?signing:?\s*(\w+)\)?', details, re.IGNORECASE)
|
|
115
|
+
signing = signing_match.group(1) if signing_match else None
|
|
116
|
+
|
|
117
|
+
# Extract SMBv1 status
|
|
118
|
+
smbv1_match = re.search(r'\(?SMBv1:?\s*(\w+)\)?', details, re.IGNORECASE)
|
|
119
|
+
smbv1 = smbv1_match.group(1) if smbv1_match else None
|
|
120
|
+
|
|
121
|
+
findings['hosts'].append({
|
|
122
|
+
'ip': ip,
|
|
123
|
+
'port': port,
|
|
124
|
+
'hostname': hostname,
|
|
125
|
+
'domain': domain,
|
|
126
|
+
'os': os_info,
|
|
127
|
+
'signing': signing,
|
|
128
|
+
'smbv1': smbv1
|
|
129
|
+
})
|
|
111
130
|
|
|
112
131
|
# Parse authentication status
|
|
113
132
|
# Format: SMB 10.0.0.14 445 HOSTNAME [+] \: (Guest)
|
|
@@ -122,13 +141,23 @@ def _parse_content(content: str, target: str) -> Dict[str, Any]:
|
|
|
122
141
|
}
|
|
123
142
|
|
|
124
143
|
# Parse share enumeration (shares WITH permissions)
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
144
|
+
# Format variations:
|
|
145
|
+
# SMB ... ADMIN$ READ,WRITE Remote Admin
|
|
146
|
+
# SMB ... ADMIN$ READ, WRITE Remote Admin (with space)
|
|
147
|
+
# SMB ... C$ READ ONLY Default share
|
|
148
|
+
share_perm_match = re.search(
|
|
149
|
+
r'SMB.*\s+(\S+\$?)\s+(READ,?\s*WRITE|READ\s*ONLY|WRITE\s*ONLY|READ|WRITE|NO\s*ACCESS)\s*(.*)$',
|
|
150
|
+
line, re.IGNORECASE
|
|
151
|
+
)
|
|
152
|
+
if share_perm_match:
|
|
153
|
+
share_name = share_perm_match.group(1)
|
|
154
|
+
# Skip if it looks like a header or status line
|
|
155
|
+
if share_name not in ['Share', 'Permissions', 'shares']:
|
|
156
|
+
findings['shares'].append({
|
|
157
|
+
'name': share_name,
|
|
158
|
+
'permissions': share_perm_match.group(2).upper().replace(' ', ''),
|
|
159
|
+
'comment': share_perm_match.group(3).strip() if share_perm_match.group(3) else ''
|
|
160
|
+
})
|
|
132
161
|
# Parse share enumeration (shares WITHOUT explicit permissions - just listed)
|
|
133
162
|
elif 'SMB' in line and not ('Share' in line and 'Permissions' in line) and not '-----' in line:
|
|
134
163
|
# Look for lines with share names (ending with $, or common names like print$, public, IPC$)
|
|
@@ -146,9 +175,16 @@ def _parse_content(content: str, target: str) -> Dict[str, Any]:
|
|
|
146
175
|
'comment': remark
|
|
147
176
|
})
|
|
148
177
|
|
|
149
|
-
# Parse user enumeration
|
|
150
|
-
|
|
151
|
-
|
|
178
|
+
# Parse user enumeration with flexible format
|
|
179
|
+
# Format variations:
|
|
180
|
+
# username badpwdcount: 0 desc: Description
|
|
181
|
+
# username badpwdcount:0 desc:Description
|
|
182
|
+
# username baddpwdcount: 0 description: Description
|
|
183
|
+
if 'badpwdcount' in line.lower() or 'baddpwdcount' in line.lower():
|
|
184
|
+
user_match = re.search(
|
|
185
|
+
r'(\S+)\s+bad+pwdcount:?\s*(\d+)\s+(?:desc(?:ription)?:?\s*)?(.+)?',
|
|
186
|
+
line, re.IGNORECASE
|
|
187
|
+
)
|
|
152
188
|
if user_match:
|
|
153
189
|
findings['users'].append({
|
|
154
190
|
'username': user_match.group(1),
|
|
@@ -165,15 +201,37 @@ def _parse_content(content: str, target: str) -> Dict[str, Any]:
|
|
|
165
201
|
})
|
|
166
202
|
|
|
167
203
|
# Parse valid credentials (but not Guest authentication)
|
|
168
|
-
|
|
169
|
-
|
|
204
|
+
# Format variations:
|
|
205
|
+
# [+] DOMAIN\username:password (Pwn3d!)
|
|
206
|
+
# [+] DOMAIN\\username:password (Pwn3d!)
|
|
207
|
+
# [+] username:password (Pwn3d!)
|
|
208
|
+
# [+] DOMAIN/username:password (Pwn3d!)
|
|
209
|
+
if '[+]' in line and ('Pwn3d' in line or ':' in line):
|
|
210
|
+
# Try domain\user:pass format first
|
|
211
|
+
cred_match = re.search(
|
|
212
|
+
r'\[\+\]\s*([^\\/:]+)[\\\/]+([^:]+):([^\s(]+)\s*(\(Pwn3d!?\))?',
|
|
213
|
+
line, re.IGNORECASE
|
|
214
|
+
)
|
|
170
215
|
if cred_match:
|
|
171
216
|
findings['credentials'].append({
|
|
172
|
-
'domain': cred_match.group(1),
|
|
173
|
-
'username': cred_match.group(2),
|
|
174
|
-
'password': cred_match.group(3),
|
|
217
|
+
'domain': cred_match.group(1).strip(),
|
|
218
|
+
'username': cred_match.group(2).strip(),
|
|
219
|
+
'password': cred_match.group(3).strip(),
|
|
175
220
|
'admin': bool(cred_match.group(4))
|
|
176
221
|
})
|
|
222
|
+
else:
|
|
223
|
+
# Try user:pass format (no domain)
|
|
224
|
+
cred_match = re.search(
|
|
225
|
+
r'\[\+\]\s*([^:@\s]+):([^\s(]+)\s*(\(Pwn3d!?\))?',
|
|
226
|
+
line, re.IGNORECASE
|
|
227
|
+
)
|
|
228
|
+
if cred_match and '@' not in cred_match.group(1):
|
|
229
|
+
findings['credentials'].append({
|
|
230
|
+
'domain': '',
|
|
231
|
+
'username': cred_match.group(1).strip(),
|
|
232
|
+
'password': cred_match.group(2).strip(),
|
|
233
|
+
'admin': bool(cred_match.group(3))
|
|
234
|
+
})
|
|
177
235
|
|
|
178
236
|
# Extract admin credentials for auto-chaining
|
|
179
237
|
admin_creds = [c for c in findings['credentials'] if c.get('admin')]
|
|
@@ -56,47 +56,62 @@ def parse_dnsrecon_output(output: str, target: str = "") -> Dict[str, Any]:
|
|
|
56
56
|
if not line_stripped or line_stripped.startswith('[-]'):
|
|
57
57
|
continue
|
|
58
58
|
|
|
59
|
-
# Parse
|
|
60
|
-
#
|
|
61
|
-
#
|
|
62
|
-
#
|
|
63
|
-
|
|
59
|
+
# Parse DNS records from dnsrecon output
|
|
60
|
+
# Old format: [*] A cybersoulsecurity.com 198.185.159.144
|
|
61
|
+
# New format: 2026-01-08T13:50:16.302153-1000 INFO SOA dns1.p01.nsone.net 198.51.44.1
|
|
62
|
+
# New format: 2026-01-08T13:50:17.112742-1000 INFO NS dns4.p01.nsone.net 198.51.45.65
|
|
63
|
+
|
|
64
|
+
record_type = None
|
|
65
|
+
hostname = None
|
|
66
|
+
ip = None
|
|
64
67
|
|
|
65
68
|
if line_stripped.startswith('[*]'):
|
|
69
|
+
# Old format: [*] <type> <hostname> <ip>
|
|
66
70
|
parts = line_stripped.split()
|
|
67
|
-
if len(parts) >= 4:
|
|
71
|
+
if len(parts) >= 4:
|
|
68
72
|
record_type = parts[1]
|
|
69
73
|
hostname = parts[2].lower()
|
|
70
74
|
ip = parts[3] if len(parts) > 3 else ''
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
75
|
+
elif ' INFO ' in line_stripped:
|
|
76
|
+
# New format: TIMESTAMP INFO <type> <hostname> <ip>
|
|
77
|
+
# Split on INFO and parse the rest
|
|
78
|
+
info_idx = line_stripped.find(' INFO ')
|
|
79
|
+
if info_idx != -1:
|
|
80
|
+
record_part = line_stripped[info_idx + 6:].strip()
|
|
81
|
+
parts = record_part.split()
|
|
82
|
+
if len(parts) >= 3:
|
|
83
|
+
record_type = parts[0]
|
|
84
|
+
hostname = parts[1].lower()
|
|
85
|
+
ip = parts[2] if len(parts) > 2 else ''
|
|
86
|
+
|
|
87
|
+
if record_type and hostname:
|
|
88
|
+
# Validate IP (both IPv4 and basic IPv6)
|
|
89
|
+
is_ipv4 = re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', ip) if ip else False
|
|
90
|
+
|
|
91
|
+
if record_type == 'A' and is_ipv4:
|
|
92
|
+
if hostname not in seen_hosts:
|
|
93
|
+
seen_hosts.add(hostname)
|
|
94
|
+
result['hosts'].append({
|
|
95
|
+
'hostname': hostname,
|
|
96
|
+
'ip': ip,
|
|
97
|
+
'type': 'A'
|
|
98
|
+
})
|
|
99
|
+
if hostname != target and hostname not in seen_subdomains:
|
|
100
|
+
seen_subdomains.add(hostname)
|
|
101
|
+
result['subdomains'].append(hostname)
|
|
102
|
+
|
|
103
|
+
elif record_type == 'NS':
|
|
104
|
+
if hostname not in result['nameservers']:
|
|
105
|
+
result['nameservers'].append(hostname)
|
|
106
|
+
|
|
107
|
+
elif record_type == 'MX':
|
|
108
|
+
if hostname not in result['mail_servers']:
|
|
109
|
+
result['mail_servers'].append(hostname)
|
|
110
|
+
|
|
111
|
+
elif record_type == 'SOA':
|
|
112
|
+
# SOA records can also be nameservers
|
|
113
|
+
if hostname not in result['nameservers']:
|
|
114
|
+
result['nameservers'].append(hostname)
|
|
100
115
|
|
|
101
116
|
# Parse subdomain brute force results: [*] Subdomain: api.example.com IP: 1.2.3.4
|
|
102
117
|
subdomain_match = re.search(r'Subdomain:\s+(\S+)\s+IP:\s+(\d+\.\d+\.\d+\.\d+)', line_stripped)
|
|
@@ -51,16 +51,38 @@ def parse_enum4linux_output(output: str, target: str = "") -> Dict[str, Any]:
|
|
|
51
51
|
|
|
52
52
|
def _is_enum4linux_ng_output(output: str) -> bool:
|
|
53
53
|
"""Detect if output is from enum4linux-ng (YAML-style format)."""
|
|
54
|
-
#
|
|
54
|
+
# Primary indicator - explicit version string (most reliable)
|
|
55
|
+
if re.search(r'ENUM4LINUX\s*-\s*next\s*generation', output, re.IGNORECASE):
|
|
56
|
+
return True
|
|
57
|
+
if re.search(r'enum4linux-ng', output, re.IGNORECASE):
|
|
58
|
+
return True
|
|
59
|
+
|
|
60
|
+
# Secondary indicators - look for YAML-style patterns unique to ng
|
|
55
61
|
ng_indicators = [
|
|
56
|
-
re.search(r'
|
|
57
|
-
re.search(r'
|
|
58
|
-
re.search(r'
|
|
59
|
-
re.search(r'^\s
|
|
60
|
-
re.search(r
|
|
62
|
+
re.search(r'After merging (user|share|group) results', output, re.IGNORECASE),
|
|
63
|
+
re.search(r'^\s{2,}username:\s+', output, re.MULTILINE), # Indented YAML-style
|
|
64
|
+
re.search(r"^'?\d+'?:\s*$", output, re.MULTILINE), # RID entries: '1000': or 1000:
|
|
65
|
+
re.search(r'^\s{2,}(groupname|name|type|comment):\s+', output, re.MULTILINE),
|
|
66
|
+
re.search(r'Trying to get SID from lsaquery', output, re.IGNORECASE),
|
|
67
|
+
]
|
|
68
|
+
|
|
69
|
+
# Classic enum4linux indicators (to confirm it's NOT ng)
|
|
70
|
+
classic_indicators = [
|
|
71
|
+
re.search(r'enum4linux v\d', output, re.IGNORECASE),
|
|
72
|
+
re.search(r'Starting enum4linux v', output, re.IGNORECASE),
|
|
73
|
+
re.search(r'Sharename\s+Type\s+Comment', output), # Table header
|
|
74
|
+
re.search(r'\|\s+Users on', output),
|
|
61
75
|
]
|
|
62
|
-
|
|
63
|
-
|
|
76
|
+
|
|
77
|
+
ng_count = sum(1 for ind in ng_indicators if ind)
|
|
78
|
+
classic_count = sum(1 for ind in classic_indicators if ind)
|
|
79
|
+
|
|
80
|
+
# If we have classic indicators and no/few ng indicators, it's classic
|
|
81
|
+
if classic_count >= 2 and ng_count < 2:
|
|
82
|
+
return False
|
|
83
|
+
|
|
84
|
+
# If we find at least 2 ng indicators, it's probably enum4linux-ng
|
|
85
|
+
return ng_count >= 2
|
|
64
86
|
|
|
65
87
|
|
|
66
88
|
def _parse_enum4linux_ng_output(output: str, target: str = "") -> Dict[str, Any]:
|
|
@@ -297,21 +319,41 @@ def _parse_enum4linux_classic_output(output: str, target: str = "") -> Dict[str,
|
|
|
297
319
|
|
|
298
320
|
# Parse user lines from RID cycling output (Local User or Domain User)
|
|
299
321
|
elif current_section == 'users' and line and not line.startswith('='):
|
|
300
|
-
# Format:
|
|
301
|
-
|
|
322
|
+
# Format variations:
|
|
323
|
+
# "S-1-5-21-...-RID DOMAIN\username (Local User)"
|
|
324
|
+
# "S-1-5-21-...-RID DOMAIN\\username (Local User)"
|
|
325
|
+
# "username (Local User)" - simplified format
|
|
326
|
+
# "[+] DOMAIN\username" - alternate prefix
|
|
327
|
+
|
|
328
|
+
# Try full SID format first (flexible escaping)
|
|
329
|
+
user_match = re.search(r'S-1-5-21-[\d-]+\s+\S+[\\]+(\S+)\s+\((Local|Domain)\s*User\)', line, re.IGNORECASE)
|
|
302
330
|
if user_match:
|
|
303
331
|
username = user_match.group(1)
|
|
304
|
-
if username not in result['users']:
|
|
332
|
+
if username and username not in result['users']:
|
|
305
333
|
result['users'].append(username)
|
|
306
|
-
|
|
307
|
-
|
|
334
|
+
else:
|
|
335
|
+
# Try simpler DOMAIN\username format
|
|
336
|
+
user_match = re.search(r'[\[\+\]\s]*\S+[\\]+(\S+)\s+\((Local|Domain)\s*User\)', line, re.IGNORECASE)
|
|
337
|
+
if user_match:
|
|
338
|
+
username = user_match.group(1)
|
|
339
|
+
if username and username not in result['users']:
|
|
340
|
+
result['users'].append(username)
|
|
341
|
+
|
|
342
|
+
# Also parse group lines from RID cycling (Domain Group, Local Group)
|
|
308
343
|
elif current_section == 'groups' and line and not line.startswith('='):
|
|
309
|
-
# Format
|
|
310
|
-
group_match = re.
|
|
344
|
+
# Format variations similar to users
|
|
345
|
+
group_match = re.search(r'S-1-5-21-[\d-]+\s+\S+[\\]+(\S+)\s+\((Domain|Local)\s*Group\)', line, re.IGNORECASE)
|
|
311
346
|
if group_match:
|
|
312
347
|
groupname = group_match.group(1)
|
|
313
|
-
if groupname not in result['groups']:
|
|
348
|
+
if groupname and groupname not in result['groups']:
|
|
314
349
|
result['groups'].append(groupname)
|
|
350
|
+
else:
|
|
351
|
+
# Try simpler format
|
|
352
|
+
group_match = re.search(r'[\[\+\]\s]*\S+[\\]+(\S+)\s+\((Domain|Local)\s*Group\)', line, re.IGNORECASE)
|
|
353
|
+
if group_match:
|
|
354
|
+
groupname = group_match.group(1)
|
|
355
|
+
if groupname and groupname not in result['groups']:
|
|
356
|
+
result['groups'].append(groupname)
|
|
315
357
|
|
|
316
358
|
return result
|
|
317
359
|
|
|
@@ -322,23 +364,61 @@ def _parse_share_line(line: str) -> Dict[str, Any]:
|
|
|
322
364
|
|
|
323
365
|
Example: "print$ Disk Printer Drivers"
|
|
324
366
|
Example: "tmp Disk oh noes!"
|
|
367
|
+
Example: "IPC$ IPC IPC Service (Samba)"
|
|
325
368
|
"""
|
|
326
|
-
|
|
327
|
-
|
|
369
|
+
line = line.strip()
|
|
370
|
+
if not line:
|
|
371
|
+
return None
|
|
328
372
|
|
|
373
|
+
# Try multiple parsing strategies for different formats
|
|
374
|
+
|
|
375
|
+
# Strategy 1: Split on 2+ whitespace (most common)
|
|
376
|
+
parts = re.split(r'\s{2,}', line)
|
|
377
|
+
if len(parts) >= 2:
|
|
378
|
+
share_name = parts[0].strip()
|
|
379
|
+
share_type = parts[1].strip()
|
|
380
|
+
comment = parts[2].strip() if len(parts) > 2 else ''
|
|
381
|
+
|
|
382
|
+
# Validate share type is a known type
|
|
383
|
+
if share_type.upper() in ['DISK', 'IPC', 'PRINT', 'PRINTER', 'COMM', 'DEVICE']:
|
|
384
|
+
return {
|
|
385
|
+
'name': share_name,
|
|
386
|
+
'type': share_type,
|
|
387
|
+
'comment': comment,
|
|
388
|
+
'mapping': None,
|
|
389
|
+
'listing': None,
|
|
390
|
+
'writing': None
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
# Strategy 2: Tab-separated
|
|
394
|
+
parts = line.split('\t')
|
|
329
395
|
if len(parts) >= 2:
|
|
330
396
|
share_name = parts[0].strip()
|
|
331
397
|
share_type = parts[1].strip()
|
|
332
398
|
comment = parts[2].strip() if len(parts) > 2 else ''
|
|
333
399
|
|
|
400
|
+
if share_type.upper() in ['DISK', 'IPC', 'PRINT', 'PRINTER', 'COMM', 'DEVICE']:
|
|
401
|
+
return {
|
|
402
|
+
'name': share_name,
|
|
403
|
+
'type': share_type,
|
|
404
|
+
'comment': comment,
|
|
405
|
+
'mapping': None,
|
|
406
|
+
'listing': None,
|
|
407
|
+
'writing': None
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
# Strategy 3: Regex for flexible whitespace (single space minimum)
|
|
411
|
+
match = re.match(r'^(\S+)\s+(Disk|IPC|Print|Printer|Comm|Device)\s*(.*)?$', line, re.IGNORECASE)
|
|
412
|
+
if match:
|
|
334
413
|
return {
|
|
335
|
-
'name':
|
|
336
|
-
'type':
|
|
337
|
-
'comment':
|
|
414
|
+
'name': match.group(1),
|
|
415
|
+
'type': match.group(2),
|
|
416
|
+
'comment': match.group(3).strip() if match.group(3) else '',
|
|
338
417
|
'mapping': None,
|
|
339
418
|
'listing': None,
|
|
340
419
|
'writing': None
|
|
341
420
|
}
|
|
421
|
+
|
|
342
422
|
return None
|
|
343
423
|
|
|
344
424
|
|