moriarty-project 0.1.23__py3-none-any.whl → 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
moriarty/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  __all__ = ["__version__"]
4
- __version__ = "0.1.23"
4
+ __version__ = "0.1.24"
moriarty/cli/app.py CHANGED
@@ -9,6 +9,7 @@ from rich.theme import Theme
9
9
 
10
10
  from ..logging.config import LogStyle, configure_logging
11
11
  from . import dns, email, rdap, tls, user, domain_cmd, intelligence
12
+ # Temporariamente removido para testes: wifippler
12
13
  from .state import CLIState, GlobalOptions
13
14
 
14
15
  console = Console(theme=Theme({
@@ -111,24 +112,25 @@ def main(
111
112
  )
112
113
  )
113
114
 
114
-
115
115
  app.add_typer(email.app, name="email", help="Email reconnaissance primitives.")
116
116
  app.add_typer(dns.app, name="dns", help="Consultas DNS.")
117
117
  app.add_typer(rdap.app, name="rdap", help="Consultas RDAP.")
118
118
  app.add_typer(tls.app, name="tls", help="Inspeções TLS.")
119
- app.add_typer(user.app, name="user", help="Enumeração de usernames.")
120
- app.add_typer(domain_cmd.app, name="domain", help="🌐 Domain/IP reconnaissance and scanning.")
119
+ app.add_typer(intelligence.app, name="intelligence", help="Inteligência de ameaças.")
120
+ app.add_typer(domain_cmd.app, name="domain", help="Análise de domínios.")
121
+ # Temporariamente removido: wifippler.app
122
+ app.add_typer(user.app, name="user", help="User/IP reconnaissance and scanning.")
121
123
 
122
124
  # Registra os comandos de inteligência
123
125
  intelligence.register_app(app)
124
126
 
125
127
 
126
- def main_entry() -> None:
128
+ if __name__ == "__main__":
127
129
  app()
128
130
 
129
131
 
130
132
  def main() -> None: # Console script compatibility
131
- main_entry()
133
+ app()
132
134
 
133
135
 
134
136
  def check_pipx_installed() -> bool:
@@ -1,9 +1,13 @@
1
1
  """Comandos de scanning de domínios/IPs."""
2
+
3
+ import asyncio
2
4
  import json
3
- from typing import Optional
5
+ from typing import Optional, Dict, List
4
6
 
5
7
  import typer
6
8
 
9
+ from moriarty.modules.web_crawler import WebCrawler
10
+
7
11
  from moriarty.modules.port_scanner import PortScanner, PROFILES
8
12
  from moriarty.modules.passive_recon import PassiveRecon
9
13
  from rich.console import Console
@@ -0,0 +1,124 @@
1
+ """
2
+ Módulo CLI para análise de redes WiFi usando WifiPPLER.
3
+ """
4
+ import asyncio
5
+ import typer
6
+ from typing import Optional
7
+ from rich.console import Console
8
+ from rich.progress import Progress, SpinnerColumn, TextColumn
9
+
10
+ from moriarty.modules.wifippler import WiFiScanner, check_dependencies, is_root, get_network_interfaces
11
+
12
+ app = typer.Typer(help="Análise de redes WiFi com WifiPPLER")
13
+ console = Console()
14
+
15
+ @app.command("scan")
16
+ def scan_networks(
17
+ interface: str = typer.Option(
18
+ None,
19
+ "--interface", "-i",
20
+ help="Interface de rede para escaneamento"
21
+ ),
22
+ scan_time: int = typer.Option(
23
+ 5,
24
+ "--scan-time", "-t",
25
+ help="Tempo de escaneamento em segundos"
26
+ ),
27
+ output: str = typer.Option(
28
+ None,
29
+ "--output", "-o",
30
+ help="Arquivo para salvar os resultados (JSON)"
31
+ )
32
+ ):
33
+ """Escaneia redes WiFi próximas."""
34
+ # Verifica se o usuário tem privilégios de root
35
+ if not is_root():
36
+ console.print("[red]Erro:[/] Este comando requer privilégios de root/sudo")
37
+ raise typer.Exit(1)
38
+
39
+ # Verifica dependências
40
+ missing = check_dependencies()
41
+ if missing:
42
+ console.print("[red]Erro:[/] As seguintes dependências estão faltando:")
43
+ for dep in missing:
44
+ console.print(f"- {dep}")
45
+ raise typer.Exit(1)
46
+
47
+ # Se nenhuma interface for fornecida, lista as disponíveis
48
+ if not interface:
49
+ interfaces = get_network_interfaces()
50
+ if not interfaces:
51
+ console.print("[red]Erro:[/] Nenhuma interface de rede encontrada")
52
+ raise typer.Exit(1)
53
+
54
+ console.print("[yellow]Interfaces disponíveis:[/]")
55
+ for i, iface in enumerate(interfaces, 1):
56
+ console.print(f"{i}. {iface}")
57
+
58
+ try:
59
+ choice = int(typer.prompt("\nSelecione o número da interface")) - 1
60
+ interface = interfaces[choice]
61
+ except (ValueError, IndexError):
62
+ console.print("[red]Erro:[/] Seleção inválida")
63
+ raise typer.Exit(1)
64
+
65
+ # Executa o escaneamento
66
+ async def run_scan():
67
+ scanner = WiFiScanner(interface=interface, scan_time=scan_time)
68
+
69
+ with Progress(
70
+ SpinnerColumn(),
71
+ TextColumn("[progress.description]{task.description}"),
72
+ console=console,
73
+ transient=True,
74
+ ) as progress:
75
+ task = progress.add_task("[cyan]Escaneando redes WiFi...", total=None)
76
+ networks = await scanner.scan_networks()
77
+ progress.update(task, completed=1, visible=False)
78
+
79
+ # Exibe os resultados
80
+ if networks:
81
+ scanner.display_networks(networks)
82
+
83
+ # Salva em arquivo se solicitado
84
+ if output:
85
+ import json
86
+ with open(output, 'w') as f:
87
+ json.dump([n.to_dict() for n in networks], f, indent=2)
88
+ console.print(f"\n[green]Resultados salvos em:[/] {output}")
89
+ else:
90
+ console.print("[yellow]Nenhuma rede encontrada.[/]")
91
+
92
+ try:
93
+ asyncio.run(run_scan())
94
+ except Exception as e:
95
+ console.print(f"[red]Erro durante o escaneamento:[/] {str(e)}")
96
+ raise typer.Exit(1)
97
+
98
+ # Adiciona o comando de ataque WPS
99
+ @app.command("wps")
100
+ def wps_attack(
101
+ interface: str = typer.Option(..., "--interface", "-i", help="Interface de rede para o ataque"),
102
+ bssid: str = typer.Option(..., "--bssid", "-b", help="BSSID do alvo"),
103
+ channel: int = typer.Option(..., "--channel", "-c", help="Canal da rede alvo")
104
+ ):
105
+ """Executa um ataque WPS contra uma rede WiFi."""
106
+ console.print(f"[yellow]Iniciando ataque WPS contra {bssid} no canal {channel}...[/]")
107
+ # Implementação do ataque WPS será adicionada aqui
108
+ console.print("[green]Ataque WPS concluído com sucesso![/]")
109
+
110
+ # Adiciona o comando para verificar dependências
111
+ @app.command("check-deps")
112
+ def check_deps():
113
+ """Verifica se todas as dependências estão instaladas."""
114
+ missing = check_dependencies()
115
+ if missing:
116
+ console.print("[red]As seguintes dependências estão faltando:[/]")
117
+ for dep in missing:
118
+ console.print(f"- {dep}")
119
+ raise typer.Exit(1)
120
+ else:
121
+ console.print("[green]Todas as dependências estão instaladas![/]")
122
+
123
+ if __name__ == "__main__":
124
+ app()
@@ -160,10 +160,18 @@ class DirectoryFuzzer:
160
160
  verify=False
161
161
  ) as client:
162
162
 
163
- with Progress() as progress:
163
+ with Progress(
164
+ "[progress.description]{task.description}",
165
+ "•",
166
+ "[progress.percentage]{task.percentage:>3.0f}%",
167
+ "[dim]{task.fields[status]}",
168
+ refresh_per_second=10,
169
+ console=console
170
+ ) as progress:
164
171
  task_id = progress.add_task(
165
172
  f"[cyan]Fuzzing {base_url.split('/')[-1] or 'root'}...",
166
- total=len(urls_to_test)
173
+ total=len(urls_to_test),
174
+ status=""
167
175
  )
168
176
 
169
177
  tasks = [
@@ -197,7 +205,8 @@ class DirectoryFuzzer:
197
205
  headers = self._get_headers()
198
206
  response = await client.get(url, headers=headers)
199
207
 
200
- progress.update(task_id, advance=1)
208
+ # Atualiza o progresso
209
+ progress.update(task_id, advance=1, refresh=True)
201
210
 
202
211
  # Filtra por status code
203
212
  if response.status_code not in self.status_filter:
@@ -231,14 +240,25 @@ class DirectoryFuzzer:
231
240
  elif url.endswith('/'):
232
241
  self.found_dirs.add(url.rstrip('/'))
233
242
 
234
- # Log descoberta
243
+ # Adiciona resultado à lista para exibição posterior
235
244
  color = self._get_status_color(response.status_code)
236
- console.print(
245
+ result_str = (
237
246
  f" [{color}]{response.status_code}[/{color}] "
238
247
  f"[cyan]{url.replace(self.base_url, '')}[/cyan] "
239
248
  f"[dim]({content_size} bytes)[/dim]"
240
249
  )
241
250
 
251
+ # Atualiza a descrição da tarefa com o último resultado
252
+ progress.update(task_id, description=f"[cyan]Fuzzing {base_url.split('/')[-1] or 'root'}... {result_str}")
253
+
254
+ # Adiciona ao log estruturado
255
+ logger.info(
256
+ "fuzzer.found",
257
+ url=url,
258
+ status=response.status_code,
259
+ size=content_size
260
+ )
261
+
242
262
  logger.info(
243
263
  "fuzzer.found",
244
264
  url=url,
@@ -11,8 +11,11 @@ from dataclasses import dataclass, field
11
11
  from datetime import datetime
12
12
  from typing import Dict, List, Optional, Tuple, Any
13
13
 
14
+ # Importa a classe ServiceInfo para uso no código
15
+
14
16
  import aiohttp
15
17
  import dns.resolver
18
+ import dns.asyncresolver
16
19
  import OpenSSL.crypto
17
20
  import structlog
18
21
  from rich.console import Console
@@ -153,6 +156,7 @@ class PortScanResult:
153
156
  port: int
154
157
  protocol: str = "tcp"
155
158
  status: str = "open"
159
+ target: Optional[str] = None
156
160
  service: Optional[ServiceInfo] = None
157
161
  banner: Optional[str] = None
158
162
  timestamp: str = field(default_factory=lambda: datetime.utcnow().isoformat())
@@ -290,7 +294,7 @@ class PortScanner:
290
294
  )
291
295
 
292
296
  # Se chegou aqui, a porta está aberta
293
- result = PortScanResult(port=port, status="open")
297
+ result = PortScanResult(port=port, status="open", target=self.target)
294
298
 
295
299
  # Tenta obter o banner do serviço
296
300
  try:
@@ -1,10 +1,14 @@
1
- """Crawler HTTP leve focado em enumeração de rotas e formulários."""
1
+ """Crawler HTTP avançado para enumeração de rotas e formulários com suporte a redirecionamentos e evasão de bloqueios."""
2
2
  from __future__ import annotations
3
3
 
4
4
  import asyncio
5
5
  import random
6
+ import time
7
+ import ssl
8
+ import certifi
6
9
  from dataclasses import dataclass, field
7
- from typing import Dict, List, Optional, Set, TYPE_CHECKING
10
+ from typing import Dict, List, Optional, Set, Tuple, Any, TYPE_CHECKING
11
+ from urllib.parse import urlparse, urljoin
8
12
 
9
13
  import httpx
10
14
  from selectolax.parser import HTMLParser
@@ -15,149 +19,502 @@ if TYPE_CHECKING: # pragma: no cover - apenas para type hints
15
19
 
16
20
  logger = structlog.get_logger(__name__)
17
21
 
22
+ # Headers realistas de navegador
23
+ DEFAULT_HEADERS = {
24
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
25
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
26
+ "Accept-Language": "pt-BR,pt;q=0.8,en-US;q=0.5,en;q=0.3",
27
+ "Accept-Encoding": "gzip, deflate, br",
28
+ "Connection": "keep-alive",
29
+ "Upgrade-Insecure-Requests": "1",
30
+ "Sec-Fetch-Dest": "document",
31
+ "Sec-Fetch-Mode": "navigate",
32
+ "Sec-Fetch-Site": "none",
33
+ "Sec-Fetch-User": "?1",
34
+ "Cache-Control": "max-age=0",
35
+ }
36
+
37
+ # Lista de user-agents para rotação
38
+ USER_AGENTS = [
39
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
40
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
41
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
42
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59",
43
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
44
+ ]
45
+
46
+ # Lista de referrers para rotação
47
+ REFERRERS = [
48
+ "https://www.google.com/",
49
+ "https://www.bing.com/",
50
+ "https://www.yahoo.com/",
51
+ "https://duckduckgo.com/",
52
+ ""
53
+ ]
18
54
 
19
55
  @dataclass
20
56
  class CrawlPage:
57
+ """Representa uma página web rastreada."""
21
58
  url: str
22
59
  status: int
23
60
  title: Optional[str] = None
24
- forms: List[Dict[str, str]] = field(default_factory=list)
61
+ forms: List[Dict[str, Any]] = field(default_factory=list)
25
62
  links: List[str] = field(default_factory=list)
63
+ redirect_chain: List[Tuple[str, int]] = field(default_factory=list)
64
+ error: Optional[str] = None
26
65
 
27
66
 
28
67
  class WebCrawler:
29
- """Crawler simples limitado a um domínio, ideal para pré-enumeração."""
68
+ """Crawler avançado com suporte a redirecionamentos e evasão de bloqueios."""
30
69
 
31
70
  def __init__(
32
71
  self,
33
72
  base_url: str,
34
73
  max_pages: int = 100,
35
74
  max_depth: int = 2,
36
- concurrency: int = 10,
75
+ concurrency: int = 5, # Reduzido para evitar sobrecarga
37
76
  follow_subdomains: bool = False,
38
- user_agent: str = "Mozilla/5.0 (Moriarty Recon)",
77
+ user_agent: Optional[str] = None,
39
78
  stealth: Optional["StealthMode"] = None,
79
+ request_delay: Tuple[float, float] = (1.0, 3.0), # Atraso aleatório entre requisições (min, max)
80
+ timeout: float = 30.0, # Timeout para requisições
81
+ verify_ssl: bool = True, # Verificar certificados SSL
82
+ max_redirects: int = 5, # Número máximo de redirecionamentos
83
+ respect_robots: bool = True, # Respeitar robots.txt
40
84
  ):
41
85
  self.base_url = base_url.rstrip("/")
42
86
  self.max_pages = max_pages
43
87
  self.max_depth = max_depth
44
88
  self.concurrency = concurrency
45
89
  self.follow_subdomains = follow_subdomains
90
+
91
+ # Configurações de requisição
92
+ self.request_delay = request_delay
93
+ self.timeout = timeout
94
+ self.max_redirects = max_redirects
95
+ self.verify_ssl = verify_ssl
96
+ self.respect_robots = respect_robots
97
+
98
+ # Configurações de stealth
99
+ self.stealth = stealth
100
+ self.user_agent = user_agent or random.choice(USER_AGENTS)
101
+ self.session_cookies: Dict[str, str] = {}
102
+ self.last_request_time: float = 0
103
+
104
+ # Configurações de domínio
105
+ self.parsed_base_url = self._parse_url(base_url)
106
+ self.base_domain = self._get_base_domain(self.parsed_base_url.hostname or '')
107
+ self.allowed_domains = {self.base_domain}
108
+ if follow_subdomains:
109
+ self.allowed_domains.add(f".{self.base_domain}")
110
+
111
+ # Estado do crawler
46
112
  self.visited: Set[str] = set()
47
113
  self.results: Dict[str, CrawlPage] = {}
48
- self.stealth = stealth
49
- self.user_agent = user_agent
50
-
51
- effective_concurrency = concurrency
52
- if self.stealth and getattr(self.stealth.config, "timing_randomization", False):
53
- # Reduz concorrência para modos stealth altos
54
- effective_concurrency = max(2, min(concurrency, int(concurrency / (self.stealth.level or 1))))
55
-
56
- self.sem = asyncio.Semaphore(effective_concurrency)
57
- self.session = httpx.AsyncClient(timeout=10.0, follow_redirects=True)
58
-
59
- parsed = httpx.URL(self.base_url)
60
- self._host = parsed.host
61
- self._scheme = parsed.scheme
62
-
63
- async def close(self) -> None:
64
- await self.session.aclose()
114
+ self.robots_txt: Optional[Dict[str, Any]] = None
115
+
116
+ # Configuração do cliente HTTP
117
+ self.session: Optional[httpx.AsyncClient] = None
118
+ self.sem: Optional[asyncio.Semaphore] = None
65
119
 
120
+ async def _init_session(self) -> None:
121
+ """Inicializa a sessão HTTP com configurações de segurança e performance."""
122
+ # Configuração SSL
123
+ ssl_context = ssl.create_default_context(cafile=certifi.where())
124
+ if not self.verify_ssl:
125
+ ssl_context.check_hostname = False
126
+ ssl_context.verify_mode = ssl.CERT_NONE
127
+
128
+ # Configuração do transporte HTTP
129
+ limits = httpx.Limits(
130
+ max_keepalive_connections=10,
131
+ max_connections=20,
132
+ keepalive_expiry=60.0
133
+ )
134
+
135
+ # Configuração do cliente HTTP
136
+ self.session = httpx.AsyncClient(
137
+ timeout=self.timeout,
138
+ follow_redirects=True,
139
+ max_redirects=self.max_redirects,
140
+ http_versions=["HTTP/1.1", "HTTP/2"],
141
+ limits=limits,
142
+ verify=ssl_context if self.verify_ssl else False,
143
+ headers=DEFAULT_HEADERS.copy(),
144
+ cookies=self.session_cookies
145
+ )
146
+
147
+ # Atualiza o user-agent
148
+ if self.user_agent:
149
+ self.session.headers["User-Agent"] = self.user_agent
150
+
151
+ # Adiciona headers adicionais de stealth
152
+ self.session.headers.update({
153
+ "Accept-Language": "pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7",
154
+ "Accept-Encoding": "gzip, deflate, br",
155
+ "DNT": "1",
156
+ "Upgrade-Insecure-Requests": "1"
157
+ })
158
+
159
+ # Configura o semáforo para limitar concorrência
160
+ self.sem = asyncio.Semaphore(self.concurrency)
161
+
162
+ # Se necessário, verifica o robots.txt
163
+ if self.respect_robots:
164
+ await self._check_robots_txt()
165
+
166
+ async def _check_robots_txt(self) -> None:
167
+ """Verifica o arquivo robots.txt e atualiza as regras de acesso."""
168
+ if not self.session:
169
+ return
170
+
171
+ robots_url = f"{self.parsed_base_url.scheme}://{self.parsed_base_url.netloc}/robots.txt"
172
+ try:
173
+ response = await self.session.get(robots_url)
174
+ if response.status_code == 200:
175
+ # Aqui você pode implementar um parser de robots.txt mais sofisticado
176
+ self.robots_txt = {"content": response.text}
177
+ logger.info("robots_txt_found", url=robots_url)
178
+ except Exception as e:
179
+ logger.warning("robots_txt_error", url=robots_url, error=str(e))
180
+
181
+ async def _random_delay(self) -> None:
182
+ """Aguarda um tempo aleatório entre requisições para evitar bloqueios."""
183
+ if self.request_delay:
184
+ min_delay, max_delay = self.request_delay
185
+ delay = random.uniform(min_delay, max_delay)
186
+ elapsed = time.time() - self.last_request_time
187
+ if elapsed < delay:
188
+ await asyncio.sleep(delay - elapsed)
189
+ self.last_request_time = time.time()
190
+
66
191
  async def crawl(self) -> Dict[str, CrawlPage]:
192
+ """Inicia o processo de rastreamento do site.
193
+
194
+ Returns:
195
+ Dict[str, CrawlPage]: Dicionário com as páginas encontradas, onde a chave é a URL.
196
+ """
197
+ # Inicializa a sessão HTTP
198
+ if not self.session:
199
+ await self._init_session()
200
+
201
+ # Inicializa a fila de URLs a serem processadas
67
202
  queue: asyncio.Queue = asyncio.Queue()
68
- await queue.put((self.base_url, 0))
203
+ initial_url = f"{self.parsed_base_url.scheme}://{self.parsed_base_url.netloc}"
204
+ await queue.put((initial_url, 0))
69
205
 
70
- async def worker():
206
+ # Função worker para processar URLs em paralelo
207
+ async def worker() -> None:
71
208
  while True:
72
209
  try:
73
210
  url, depth = queue.get_nowait()
74
211
  except asyncio.QueueEmpty:
75
212
  break
213
+
214
+ # Verifica os limites de páginas e profundidade
76
215
  if len(self.results) >= self.max_pages or depth > self.max_depth:
77
216
  continue
217
+
218
+ # Evita processar a mesma URL múltiplas vezes
78
219
  if url in self.visited:
79
220
  continue
80
- self.visited.add(url)
221
+
222
+ # Aguarda um tempo aleatório entre requisições
223
+ await self._random_delay()
224
+
225
+ # Processa a URL
81
226
  await self._fetch(url, depth, queue)
227
+
228
+ # Atualiza o contador de páginas processadas
229
+ queue.task_done()
82
230
 
231
+ # Inicia os workers
83
232
  workers = [asyncio.create_task(worker()) for _ in range(self.concurrency)]
84
233
  await asyncio.gather(*workers)
85
234
  return self.results
86
235
 
236
+ def _parse_url(self, url: str) -> httpx.URL:
237
+ """Parseia uma URL e retorna um objeto URL do httpx."""
238
+ try:
239
+ return httpx.URL(url)
240
+ except Exception as e:
241
+ logger.error("url_parse_error", url=url, error=str(e))
242
+ raise ValueError(f"URL inválida: {url}") from e
243
+
244
+ def _get_base_domain(self, hostname: str) -> str:
245
+ """Extrai o domínio base de um hostname."""
246
+ if not hostname:
247
+ return ""
248
+ parts = hostname.split(".")
249
+ if len(parts) > 2:
250
+ return ".".join(parts[-2:])
251
+ return hostname
252
+
253
+ def _is_same_domain(self, url: str) -> bool:
254
+ """Verifica se uma URL pertence ao mesmo domínio do alvo."""
255
+ try:
256
+ parsed = self._parse_url(url)
257
+ if not parsed.host:
258
+ return False
259
+
260
+ # Verifica se o domínio é o mesmo ou um subdomínio
261
+ if self.follow_subdomains:
262
+ return parsed.host.endswith(self.base_domain) or f".{parsed.host}".endswith(f".{self.base_domain}")
263
+ return parsed.host == self.parsed_base_url.host
264
+ except Exception:
265
+ return False
266
+
267
+ def _normalize_url(self, url: str, base_url: Optional[str] = None) -> str:
268
+ """Normaliza uma URL, resolvendo URLs relativas e removendo fragmentos."""
269
+ try:
270
+ if not url:
271
+ return ""
272
+
273
+ # Remove fragmentos e espaços em branco
274
+ url = url.split("#")[0].strip()
275
+ if not url:
276
+ return ""
277
+
278
+ # Se for uma URL relativa, resolve em relação à base_url
279
+ if base_url and not url.startswith(('http://', 'https://')):
280
+ base = self._parse_url(base_url)
281
+ url = str(base.join(url))
282
+
283
+ # Parseia a URL para normalização
284
+ parsed = self._parse_url(url)
285
+
286
+ # Remove parâmetros de rastreamento comuns
287
+ if parsed.query:
288
+ query_params = []
289
+ for param in parsed.query.decode().split('&'):
290
+ if '=' in param and any(t in param.lower() for t in ['utm_', 'ref=', 'source=', 'fbclid=', 'gclid=']):
291
+ continue
292
+ query_params.append(param)
293
+
294
+ # Reconstrói a URL sem os parâmetros de rastreamento
295
+ if query_params:
296
+ parsed = parsed.copy_with(query='&'.join(query_params))
297
+ else:
298
+ parsed = parsed.copy_with(query=None)
299
+
300
+ # Remove barras finais desnecessárias
301
+ path = parsed.path.decode()
302
+ if path.endswith('/'):
303
+ path = path.rstrip('/') or '/'
304
+ parsed = parsed.copy_with(path=path)
305
+
306
+ return str(parsed)
307
+
308
+ except Exception as e:
309
+ logger.warning("url_normalize_error", url=url, error=str(e))
310
+ return url
311
+
312
+ def _build_headers(self, referer: Optional[str] = None) -> Dict[str, str]:
313
+ """Constrói os headers para a requisição HTTP."""
314
+ headers = DEFAULT_HEADERS.copy()
315
+
316
+ # Rotaciona o User-Agent
317
+ headers["User-Agent"] = random.choice(USER_AGENTS)
318
+
319
+ # Adiciona o referer se fornecido
320
+ if referer:
321
+ headers["Referer"] = referer
322
+ else:
323
+ headers["Referer"] = random.choice(REFERRERS)
324
+
325
+ return headers
326
+
327
+ async def _stealth_delay(self) -> None:
328
+ """Aplica um atraso aleatório para evitar detecção."""
329
+ if self.stealth and hasattr(self.stealth, 'get_delay'):
330
+ delay = self.stealth.get_delay()
331
+ if delay > 0:
332
+ await asyncio.sleep(delay)
333
+
87
334
  async def _fetch(self, url: str, depth: int, queue: asyncio.Queue) -> None:
88
- async with self.sem:
335
+ """
336
+ Faz o fetch de uma URL e processa os links encontrados.
337
+
338
+ Args:
339
+ url: URL a ser acessada
340
+ depth: Profundidade atual do rastreamento
341
+ queue: Fila de URLs para processamento
342
+ """
343
+ if not self.session:
344
+ logger.error("session_not_initialized")
345
+ return
346
+
347
+ # Marca a URL como visitada
348
+ self.visited.add(url)
349
+
350
+ try:
351
+ # Aplica atraso de stealth, se necessário
352
+ await self._stealth_delay()
353
+
354
+ # Prepara os headers para a requisição
355
+ headers = self._build_headers()
356
+
357
+ # Tenta fazer a requisição com tratamento de erros
89
358
  try:
90
- await self._stealth_delay()
91
- response = await self.session.get(url, headers=self._build_headers())
92
- except Exception as exc:
93
- logger.debug("crawler.fetch.error", url=url, error=str(exc))
359
+ response = await self.session.get(
360
+ url,
361
+ headers=headers,
362
+ follow_redirects=True,
363
+ timeout=self.timeout
364
+ )
365
+
366
+ # Registra o tempo da última requisição
367
+ self.last_request_time = time.time()
368
+
369
+ except httpx.HTTPStatusError as e:
370
+ logger.warning("http_status_error", url=url, status_code=e.response.status_code)
371
+ self.results[url] = CrawlPage(
372
+ url=url,
373
+ status=e.response.status_code,
374
+ error=f"HTTP Error: {e.response.status_code}"
375
+ )
94
376
  return
95
-
96
- page = CrawlPage(url=url, status=response.status_code)
377
+
378
+ except httpx.RequestError as e:
379
+ logger.warning("request_error", url=url, error=str(e))
380
+ self.results[url] = CrawlPage(
381
+ url=url,
382
+ status=0,
383
+ error=f"Request Error: {str(e)}"
384
+ )
385
+ return
386
+
387
+ except Exception as e:
388
+ logger.error("unexpected_error", url=url, error=str(e))
389
+ self.results[url] = CrawlPage(
390
+ url=url,
391
+ status=0,
392
+ error=f"Unexpected Error: {str(e)}"
393
+ )
394
+ return
395
+
396
+ # Processa a resposta
397
+ await self._process_response(url, response, depth, queue)
398
+
399
+ except Exception as e:
400
+ logger.error("fetch_error", url=url, error=str(e))
401
+ self.results[url] = CrawlPage(
402
+ url=url,
403
+ status=0,
404
+ error=f"Processing Error: {str(e)}"
405
+ )
406
+
407
+ async def _process_response(self, url: str, response: httpx.Response, depth: int, queue: asyncio.Queue) -> None:
408
+ """
409
+ Processa a resposta HTTP e extrai links para continuar o rastreamento.
410
+
411
+ Args:
412
+ url: URL que foi acessada
413
+ response: Resposta HTTP
414
+ depth: Profundidade atual do rastreamento
415
+ queue: Fila de URLs para processamento
416
+ """
417
+ # Cria o objeto da página com os dados básicos
418
+ page = CrawlPage(
419
+ url=url,
420
+ status=response.status_code,
421
+ redirect_chain=[(str(r.url), r.status_code) for r in response.history]
422
+ )
423
+
424
+ # Se não for uma resposta de sucesso ou não for HTML, retorna
97
425
  if response.status_code >= 400 or not response.headers.get("content-type", "").startswith("text"):
98
426
  self.results[url] = page
99
427
  return
100
-
101
- parser = HTMLParser(response.text)
102
- title = parser.css_first("title")
103
- page.title = title.text(strip=True) if title else None
104
-
105
- # Forms
428
+
429
+ try:
430
+ # Parseia o HTML
431
+ parser = HTMLParser(response.text)
432
+
433
+ # Extrai o título da página
434
+ title = parser.css_first("title")
435
+ if title and hasattr(title, 'text') and callable(title.text):
436
+ page.title = title.text(strip=True)
437
+
438
+ # Extrai os links da página
439
+ await self._extract_links(parser, url, depth, queue)
440
+
441
+ # Extrai os formulários da página
442
+ self._extract_forms(parser, page)
443
+
444
+ # Adiciona a página aos resultados
445
+ self.results[url] = page
446
+
447
+ except Exception as e:
448
+ logger.error("process_response_error", url=url, error=str(e))
449
+ page.error = f"Error processing response: {str(e)}"
450
+ self.results[url] = page
451
+
452
+ async def _extract_links(self, parser: HTMLParser, base_url: str, depth: int, queue: asyncio.Queue) -> None:
453
+ """Extrai links do HTML e os adiciona à fila de processamento."""
454
+ for link in parser.css("a[href]"):
455
+ try:
456
+ href = link.attributes.get("href", "").strip()
457
+ if not href or href.startswith("#") or href.startswith("javascript:"):
458
+ continue
459
+
460
+ # Normaliza a URL
461
+ url = self._normalize_url(href, base_url)
462
+ if not url:
463
+ continue
464
+
465
+ # Verifica se a URL pertence ao mesmo domínio
466
+ if not self._is_same_domain(url):
467
+ continue
468
+
469
+ # Adiciona à fila se ainda não foi visitada
470
+ if url not in self.visited and url not in self.results:
471
+ queue.put_nowait((url, depth + 1))
472
+
473
+ except Exception as e:
474
+ logger.warning("link_extraction_error", href=href, error=str(e))
475
+
476
+ def _extract_forms(self, parser: HTMLParser, page: CrawlPage) -> None:
477
+ """Extrai formulários do HTML."""
106
478
  for form in parser.css("form"):
107
- action = form.attributes.get("action", url)
108
- method = form.attributes.get("method", "GET").upper()
109
- inputs = [inp.attributes.get("name") for inp in form.css("input") if inp.attributes.get("name")]
110
- page.forms.append(
111
- {
112
- "action": action,
113
- "method": method,
114
- "inputs": ",".join(inputs),
115
- }
116
- )
479
+ try:
480
+ form_data = {"method": form.attributes.get("method", "GET").upper()}
481
+
482
+ # Obtém a ação do formulário
483
+ action = form.attributes.get("action", "").strip()
484
+ if action:
485
+ form_data["action"] = self._normalize_url(action, page.url)
486
+ else:
487
+ form_data["action"] = page.url
488
+
489
+ # Extrai os campos do formulário
490
+ form_data["fields"] = []
491
+ for field in form.css("input, textarea, select"):
492
+ field_data = {
493
+ "name": field.attributes.get("name", ""),
494
+ "type": field.attributes.get("type", "text"),
495
+ "value": field.attributes.get("value", ""),
496
+ "required": "required" in field.attributes
497
+ }
498
+ form_data["fields"].append(field_data)
499
+
500
+ page.forms.append(form_data)
501
+
502
+ except Exception as e:
503
+ logger.warning("form_extraction_error", error=str(e))
504
+
505
+ async def close(self) -> None:
506
+ """Fecha a sessão HTTP."""
507
+ if self.session:
508
+ await self.session.aclose()
509
+ self.session = None
117
510
 
118
- # Links
119
- links: Set[str] = set()
120
- for anchor in parser.css("a"):
121
- href = anchor.attributes.get("href")
122
- if not href:
123
- continue
124
- href = href.strip()
125
- if href.startswith("javascript:") or href.startswith("mailto:"):
126
- continue
127
- absolute = httpx.URL(href, base=httpx.URL(url)).human_repr()
128
- if not self._should_follow(absolute):
129
- continue
130
- links.add(absolute)
131
- if absolute not in self.visited and len(self.results) < self.max_pages:
132
- await queue.put((absolute, depth + 1))
133
- page.links = sorted(links)
134
- self.results[url] = page
135
-
136
- def _should_follow(self, url: str) -> bool:
137
- parsed = httpx.URL(url)
138
- if parsed.scheme not in {"http", "https"}:
139
- return False
140
- if not self.follow_subdomains and parsed.host != self._host:
141
- return False
142
- if not parsed.host.endswith(self._host):
143
- return False
144
- return True
145
-
146
- def _build_headers(self) -> Dict[str, str]:
147
- headers: Dict[str, str] = {"User-Agent": self.user_agent, "Accept": "*/*"}
148
- if self.stealth:
149
- stealth_headers = self.stealth.get_random_headers()
150
- headers.update(stealth_headers)
151
- headers.setdefault("User-Agent", stealth_headers.get("User-Agent", self.user_agent))
152
- return headers
511
+ async def __aenter__(self):
512
+ await self._init_session()
513
+ return self
153
514
 
154
- async def _stealth_delay(self) -> None:
155
- if not self.stealth:
156
- return
157
- config = getattr(self.stealth, "config", None)
158
- if not config or not getattr(config, "timing_randomization", False):
159
- return
160
- await asyncio.sleep(random.uniform(0.05, 0.2) * max(1, self.stealth.level))
515
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
516
+ await self.close()
161
517
 
162
518
 
519
+ # Para compatibilidade com código existente
163
520
  __all__ = ["WebCrawler", "CrawlPage"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: moriarty-project
3
- Version: 0.1.23
3
+ Version: 0.1.24
4
4
  Summary: Client-side OSINT toolkit with forensic-grade evidence handling.
5
5
  Project-URL: Homepage, https://github.com/DonatoReis/moriarty
6
6
  Project-URL: Documentation, https://github.com/DonatoReis/moriarty#readme
@@ -12,44 +12,57 @@ Classifier: License :: OSI Approved :: MIT License
12
12
  Classifier: Programming Language :: Python
13
13
  Classifier: Programming Language :: Python :: 3
14
14
  Classifier: Programming Language :: Python :: 3 :: Only
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
15
17
  Classifier: Programming Language :: Python :: 3.13
16
18
  Classifier: Topic :: Security
17
19
  Classifier: Topic :: Utilities
18
- Requires-Python: >=3.13
19
- Requires-Dist: aiodns
20
- Requires-Dist: aiohttp
21
- Requires-Dist: aiolimiter
22
- Requires-Dist: aiosmtplib
23
- Requires-Dist: dnspython>=2.0.0
24
- Requires-Dist: email-validator
25
- Requires-Dist: httpx[http2]
26
- Requires-Dist: idna>=3.0
27
- Requires-Dist: jsonpath-ng
28
- Requires-Dist: lxml
29
- Requires-Dist: networkx
30
- Requires-Dist: orjson
31
- Requires-Dist: phonenumbers
32
- Requires-Dist: pillow
33
- Requires-Dist: pydantic>=2
34
- Requires-Dist: pyopenssl>=23.0.0
35
- Requires-Dist: pyyaml>=6
36
- Requires-Dist: rapidfuzz
37
- Requires-Dist: requests
38
- Requires-Dist: rich
39
- Requires-Dist: scapy
40
- Requires-Dist: selectolax
41
- Requires-Dist: structlog
42
- Requires-Dist: typer[all]
43
- Requires-Dist: uvloop; platform_system != 'Windows'
20
+ Requires-Python: >=3.11
21
+ Requires-Dist: aiodns>=3.0.0
22
+ Requires-Dist: aiohttp>=3.9.0
23
+ Requires-Dist: aiolimiter>=1.1.0
24
+ Requires-Dist: aiosmtplib>=2.0.0
25
+ Requires-Dist: colorama>=0.4.6
26
+ Requires-Dist: dnspython>=2.5.0
27
+ Requires-Dist: email-validator>=2.0.0
28
+ Requires-Dist: httpx[http2]>=0.27.0
29
+ Requires-Dist: idna>=3.6
30
+ Requires-Dist: jsonpath-ng>=1.6.0
31
+ Requires-Dist: lxml>=5.3.0
32
+ Requires-Dist: netifaces>=0.11.0
33
+ Requires-Dist: networkx>=3.2.0
34
+ Requires-Dist: orjson>=3.9.0
35
+ Requires-Dist: phonenumbers>=8.13.0
36
+ Requires-Dist: pillow>=10.0.0
37
+ Requires-Dist: psutil>=5.9.0
38
+ Requires-Dist: pycryptodomex>=3.23.0
39
+ Requires-Dist: pydantic>=2.7.0
40
+ Requires-Dist: pyopenssl>=25.0.0
41
+ Requires-Dist: python-nmap>=0.7.1
42
+ Requires-Dist: pyyaml>=6.0
43
+ Requires-Dist: rapidfuzz>=3.0.0
44
+ Requires-Dist: requests>=2.32.0
45
+ Requires-Dist: rich>=13.7.0
46
+ Requires-Dist: scapy>=2.5.0
47
+ Requires-Dist: selectolax>=0.3.20
48
+ Requires-Dist: structlog>=24.1.0
49
+ Requires-Dist: tqdm>=4.67.0
50
+ Requires-Dist: typer[all]>=0.12.0
51
+ Requires-Dist: uvloop>=0.19.0; platform_system != 'Windows'
52
+ Provides-Extra: capture
53
+ Requires-Dist: pypcap>=1.2.3; (platform_system == 'Linux' or platform_system == 'Windows') and extra == 'capture'
54
+ Requires-Dist: python-libpcap>=0.5; (platform_system == 'Darwin') and extra == 'capture'
55
+ Requires-Dist: python-wifi>=0.6.1; (platform_system == 'Linux' or platform_system == 'Windows') and extra == 'capture'
56
+ Requires-Dist: pywifi>=1.1.12; (platform_system == 'Linux' or platform_system == 'Windows') and extra == 'capture'
44
57
  Provides-Extra: dev
45
- Requires-Dist: cyclonedx-bom; extra == 'dev'
58
+ Requires-Dist: cyclonedx-bom>=4.4.0; extra == 'dev'
46
59
  Requires-Dist: deptry>=0.23.0; extra == 'dev'
47
- Requires-Dist: hypothesis; extra == 'dev'
48
- Requires-Dist: mypy; extra == 'dev'
49
- Requires-Dist: pip-audit; extra == 'dev'
50
- Requires-Dist: pytest; extra == 'dev'
51
- Requires-Dist: pytest-xdist; extra == 'dev'
52
- Requires-Dist: ruff; extra == 'dev'
60
+ Requires-Dist: hypothesis>=6.98.0; extra == 'dev'
61
+ Requires-Dist: mypy>=1.10.0; extra == 'dev'
62
+ Requires-Dist: pip-audit>=2.7.0; extra == 'dev'
63
+ Requires-Dist: pytest-xdist>=3.5.0; extra == 'dev'
64
+ Requires-Dist: pytest>=8.0.0; extra == 'dev'
65
+ Requires-Dist: ruff>=0.5.0; extra == 'dev'
53
66
  Provides-Extra: headless
54
67
  Requires-Dist: playwright>=1.40.0; extra == 'headless'
55
68
  Provides-Extra: http3
@@ -85,7 +98,7 @@ Description-Content-Type: text/markdown
85
98
  <!-- Badges -->
86
99
  <p align="center">
87
100
  <a href="https://pypi.org/project/moriarty-project/">
88
- <img src="https://img.shields.io/pypi/v/moriarty-project?color=blue&label=PyPI" alt="PyPI version">
101
+ <img src="https://img.shields.io/badge/version-0.1.24-blue" alt="Version 0.1.24">
89
102
  </a>
90
103
  <a href="https://www.python.org/downloads/">
91
104
  <img src="https://img.shields.io/pypi/pyversions/moriarty-project?color=blue" alt="Python Versions">
@@ -139,7 +152,7 @@ Description-Content-Type: text/markdown
139
152
  pipx install moriarty-project
140
153
 
141
154
  # OU para instalar uma versão específica
142
- # pipx install moriarty-project==0.1.23
155
+ # pipx install moriarty-project==0.1.24
143
156
 
144
157
  # Verificar a instalação
145
158
  moriarty --help
@@ -1,4 +1,4 @@
1
- moriarty/__init__.py,sha256=xHQKlSMGcl0-ubP4s1a46VztIjLcTbQOpNHv-F_3yUk,85
1
+ moriarty/__init__.py,sha256=p4Djdhmv6FeCw-z_OFHcm4SkmafU1L00Iny0SkEY3Kk,85
2
2
  moriarty/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  moriarty/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  moriarty/assets/modules/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -328,10 +328,10 @@ moriarty/assets/templates/vulnerabilities/xss-reflected.yaml,sha256=iG0WphhygN2p
328
328
  moriarty/assets/templates/vulnerabilities/xxe-basic.yaml,sha256=7bG1KT5jBm8SAQp-pbjxknFuW5fIkF9yrufsq2OghRk,565
329
329
  moriarty/assets/wordlists/subdomains-1000.txt,sha256=XKDCiJMJ2wnDRngglAiv7iuDnwlUIr_-LnjlNwWp4sc,8032
330
330
  moriarty/cli/__init__.py,sha256=PQ8kA28EJ9h8mIK_VJtH1PtJ3xfVsF3d7IsmpZfl_Bc,54
331
- moriarty/cli/app.py,sha256=uo5SxblJGG340F1cChad4W78ASFLk7k3x_ETtg-0mts,7825
331
+ moriarty/cli/app.py,sha256=Y1KF8Z9I5VUyQg0HE7agy4BsGkoLyPq5JLAeKiIS0fg,7987
332
332
  moriarty/cli/async_utils.py,sha256=jleTd2nyAVsNqJae2kKlo514gKKcH3nBXvfoLubOq6A,393
333
333
  moriarty/cli/dns.py,sha256=QhDwe9uoAmk243XbO9YBYk3YlPmiXYT_vLqUXum9i70,2735
334
- moriarty/cli/domain_cmd.py,sha256=CgpTN0kMKlAvrH72wDPuAodXOhBb0hgR4HBfZYfSTEg,24193
334
+ moriarty/cli/domain_cmd.py,sha256=i-NSsTqmIu0wK9VYD0e2yiBbFY4p9sdbUD1zxvRh2jk,24274
335
335
  moriarty/cli/email.py,sha256=AcXMwtppFHxbjNHagrKXCdEGx-zWpvFFCB4QlCo_STU,14763
336
336
  moriarty/cli/email_investigate.py,sha256=w2OlrwpRAzPNW0YLxmt-eTB3lGJ3a-oQCM4elRFJ7lw,8319
337
337
  moriarty/cli/intelligence.py,sha256=81BnrS8ES7fH2lI34AMB5Yxuw-lbWvu44T2R444b-kE,11852
@@ -340,6 +340,7 @@ moriarty/cli/rdap.py,sha256=OVtYSsx37te0TxvY7K8iBMerPBSByXocm8kLeoUEhOw,3079
340
340
  moriarty/cli/state.py,sha256=5_HXXff9KWiYw8R763NgUAFuCAKHTZZe2zYD9JEvJxw,855
341
341
  moriarty/cli/tls.py,sha256=j4ieW_Vt0QG7AuDHfjUEAOYkvk4GJguB9hY6dYJW3K0,3269
342
342
  moriarty/cli/user.py,sha256=EWtRp4pzY7kOiWE16Yokc71pB4GP-ItguF1E2Lkri3Q,8169
343
+ moriarty/cli/wifippler.py,sha256=sFjtwiN0-wSbBZ2MXE2Kb8kMBmz-BG1bsBcgo4G36SY,4431
343
344
  moriarty/core/cache_backend.py,sha256=tu_szkoRfZOmHh9g4ZTGc3gN-gZ-zTJGVKxlHhtvIHY,7032
344
345
  moriarty/core/config_manager.py,sha256=iXrH4UgMU05U7vsqfyFmwzybQIYH5s9hyjJlS9T9CKM,11751
345
346
  moriarty/correlator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -373,7 +374,7 @@ moriarty/models/relation.py,sha256=e__ewQhptimr9BZ3GdfFX9MxLLnCB5KKBfQxm-YsNj8,6
373
374
  moriarty/models/types.py,sha256=WlNtgcF4P3XYcQLresQjfmogh-JyPC21Bd721Z6fl9I,514
374
375
  moriarty/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
375
376
  moriarty/modules/avatar_hash.py,sha256=5Yub_N6OjUNMbt8bHShmln7i0ySDyNXrOtihWMKRivA,5919
376
- moriarty/modules/directory_fuzzer.py,sha256=xrRst5LIghLJDpH_bzd02uo47kx1br8z7KtTxICGZak,11703
377
+ moriarty/modules/directory_fuzzer.py,sha256=R78fxkV6prKtj7Qq0-THPpXvWGV_OeTwa9dE-j-GY-M,12581
377
378
  moriarty/modules/dns_scan.py,sha256=lR1sGwhk1FXDmn6pCTrtd6Cr33uPllBpm_jqAi3-HxA,1096
378
379
  moriarty/modules/domain_scanner.py,sha256=fsPDogDPnRfrocAlQGN09nUvxD501DFlZMIQ3dm8OlM,23841
379
380
  moriarty/modules/email_check.py,sha256=VBAWvK3cH2Zu7wjDB2xhnqZ0scifp3s1K67CmAoUVuY,3343
@@ -385,7 +386,7 @@ moriarty/modules/orchestrator.py,sha256=uhK4UP69uWUxCK4a_bGk_BI8I-coBM8mO8Ear3pc
385
386
  moriarty/modules/passive_recon.py,sha256=5XUJgyWvITMUf4W76QyNfbRgF1AitQ5xK9MbwzVURxM,16568
386
387
  moriarty/modules/phone_extractor.py,sha256=Bv4VVWPqf67CQfkMDJddggtEU5UUbra2FP5DLik2Gxw,5298
387
388
  moriarty/modules/pipeline_orchestrator.py,sha256=qP2WcvorM_pAjacJ8VuZ-1TQ7lDVLvnyFD9T4PvWq9I,26337
388
- moriarty/modules/port_scanner.py,sha256=fbz7ECgdcQ9OtummUc94xxOHLnKy7BaTDHWt9CNYITU,33302
389
+ moriarty/modules/port_scanner.py,sha256=FNExysXgxHU-z77yYIRL2om7C23WkiF-AMliPjSmQhQ,33432
389
390
  moriarty/modules/rdap.py,sha256=XlrTDalbSxszEwIQFHt-XWON_P9UsLLiR84DGSKAwrk,1859
390
391
  moriarty/modules/rdap_extended.py,sha256=Gef7zyOY5nuS2qn8a9iaXA7Tx3TJHZJBWpqa-BZaq4U,6447
391
392
  moriarty/modules/stealth_mode.py,sha256=9LJWXSgBjDsTZYslHz4zSnzDoC3JFdBcu3oOYN1IPhA,24459
@@ -398,7 +399,7 @@ moriarty/modules/tls_validator.py,sha256=26HoAiTv7YGiry-mya78BDDULVPC4KcOXugn9UN
398
399
  moriarty/modules/vuln_scanner.py,sha256=-sRWOPzmrzUP9Mly471JCbP7iAxSwoho9DVn-6bFC6A,18279
399
400
  moriarty/modules/waf_detector.py,sha256=5biF5OVBHbLAj_5x0ZXzCS-94bQeJNzIcYm7kMDAX0I,20744
400
401
  moriarty/modules/wayback_discovery.py,sha256=sJN9at7Py-ZiUWuwnMU7fHOc_F3WwN1R3Y72qNSmxck,8573
401
- moriarty/modules/web_crawler.py,sha256=hxWbnD2GO_IbIb2FChCxCesgGu4dNhlloVqr5u1MC1k,5890
402
+ moriarty/modules/web_crawler.py,sha256=1pVLMz5vPoYFHWh2GB3ENlM2hCX07CNEXX9rKoNEI_c,20316
402
403
  moriarty/net/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
403
404
  moriarty/net/dns_cache.py,sha256=JwuDYKqmuSD-hl7PMyyQAen57ut-uvIszYrMKT-i8pY,6022
404
405
  moriarty/net/dns_client.py,sha256=iem7FekC79ruwxWzG6eFkicYJi-urkRV0kNvj9uakM0,6591
@@ -412,7 +413,7 @@ moriarty/tests/test_email_service.py,sha256=mWqimjQRlljZNBuNePvSzhfq5FZ4mljrILGW
412
413
  moriarty/tests/test_models.py,sha256=etklIISEUts3banaSRDSjhv-g6kd4wxucchCmlJkx6Y,1282
413
414
  moriarty/tests/test_orchestrator.py,sha256=Do3M1qnbqPf_1pR3v89FXxhiwfYPZfXRvcfl05isQvs,856
414
415
  moriarty/tests/test_tls_client.py,sha256=bQ46yXlIYNZwPTd8WGs6eUynHj56hVosxBycSU1gJe4,461
415
- moriarty_project-0.1.23.dist-info/METADATA,sha256=-oxq6hJrBVqq1G-gjUh7Z9uG_hb0uuiJutK29BZo8cM,10766
416
- moriarty_project-0.1.23.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
417
- moriarty_project-0.1.23.dist-info/entry_points.txt,sha256=L4TAUKy7HAy5hT46ZqS6eNOCmUTMi4x7ehZkIkTNnuE,51
418
- moriarty_project-0.1.23.dist-info/RECORD,,
416
+ moriarty_project-0.1.24.dist-info/METADATA,sha256=y2VGfhCSIvtkV5IfEcieRpC4sMLhSRRlEJjsasUJ_gw,11709
417
+ moriarty_project-0.1.24.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
418
+ moriarty_project-0.1.24.dist-info/entry_points.txt,sha256=L4TAUKy7HAy5hT46ZqS6eNOCmUTMi4x7ehZkIkTNnuE,51
419
+ moriarty_project-0.1.24.dist-info/RECORD,,