crawlo 1.1.3__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (115) hide show
  1. crawlo/__init__.py +28 -1
  2. crawlo/__version__.py +1 -1
  3. crawlo/cleaners/__init__.py +61 -0
  4. crawlo/cleaners/data_formatter.py +226 -0
  5. crawlo/cleaners/encoding_converter.py +126 -0
  6. crawlo/cleaners/text_cleaner.py +233 -0
  7. crawlo/commands/startproject.py +117 -13
  8. crawlo/config.py +30 -0
  9. crawlo/config_validator.py +253 -0
  10. crawlo/core/engine.py +185 -11
  11. crawlo/core/scheduler.py +49 -78
  12. crawlo/crawler.py +6 -6
  13. crawlo/downloader/__init__.py +24 -0
  14. crawlo/downloader/aiohttp_downloader.py +8 -0
  15. crawlo/downloader/cffi_downloader.py +5 -0
  16. crawlo/downloader/hybrid_downloader.py +214 -0
  17. crawlo/downloader/playwright_downloader.py +403 -0
  18. crawlo/downloader/selenium_downloader.py +473 -0
  19. crawlo/extension/__init__.py +17 -10
  20. crawlo/extension/health_check.py +142 -0
  21. crawlo/extension/log_interval.py +27 -18
  22. crawlo/extension/log_stats.py +62 -24
  23. crawlo/extension/logging_extension.py +18 -9
  24. crawlo/extension/memory_monitor.py +105 -0
  25. crawlo/extension/performance_profiler.py +134 -0
  26. crawlo/extension/request_recorder.py +108 -0
  27. crawlo/filters/aioredis_filter.py +50 -12
  28. crawlo/middleware/proxy.py +26 -2
  29. crawlo/mode_manager.py +24 -19
  30. crawlo/network/request.py +30 -3
  31. crawlo/network/response.py +114 -25
  32. crawlo/pipelines/mongo_pipeline.py +81 -66
  33. crawlo/pipelines/mysql_pipeline.py +165 -43
  34. crawlo/pipelines/redis_dedup_pipeline.py +7 -3
  35. crawlo/queue/queue_manager.py +15 -2
  36. crawlo/queue/redis_priority_queue.py +144 -76
  37. crawlo/settings/default_settings.py +93 -121
  38. crawlo/subscriber.py +62 -37
  39. crawlo/templates/project/items.py.tmpl +1 -1
  40. crawlo/templates/project/middlewares.py.tmpl +73 -49
  41. crawlo/templates/project/pipelines.py.tmpl +51 -295
  42. crawlo/templates/project/settings.py.tmpl +93 -17
  43. crawlo/templates/project/settings_distributed.py.tmpl +120 -0
  44. crawlo/templates/project/settings_gentle.py.tmpl +95 -0
  45. crawlo/templates/project/settings_high_performance.py.tmpl +152 -0
  46. crawlo/templates/project/settings_simple.py.tmpl +69 -0
  47. crawlo/templates/spider/spider.py.tmpl +2 -38
  48. crawlo/tools/__init__.py +183 -0
  49. crawlo/tools/anti_crawler.py +269 -0
  50. crawlo/tools/authenticated_proxy.py +241 -0
  51. crawlo/tools/data_validator.py +181 -0
  52. crawlo/tools/date_tools.py +36 -0
  53. crawlo/tools/distributed_coordinator.py +387 -0
  54. crawlo/tools/retry_mechanism.py +221 -0
  55. crawlo/tools/scenario_adapter.py +263 -0
  56. crawlo/utils/__init__.py +29 -1
  57. crawlo/utils/batch_processor.py +261 -0
  58. crawlo/utils/date_tools.py +58 -1
  59. crawlo/utils/enhanced_error_handler.py +360 -0
  60. crawlo/utils/env_config.py +106 -0
  61. crawlo/utils/error_handler.py +126 -0
  62. crawlo/utils/performance_monitor.py +285 -0
  63. crawlo/utils/redis_connection_pool.py +335 -0
  64. crawlo/utils/redis_key_validator.py +200 -0
  65. crawlo-1.1.5.dist-info/METADATA +401 -0
  66. crawlo-1.1.5.dist-info/RECORD +185 -0
  67. tests/advanced_tools_example.py +276 -0
  68. tests/authenticated_proxy_example.py +237 -0
  69. tests/cleaners_example.py +161 -0
  70. tests/config_validation_demo.py +103 -0
  71. tests/date_tools_example.py +181 -0
  72. tests/dynamic_loading_example.py +524 -0
  73. tests/dynamic_loading_test.py +105 -0
  74. tests/env_config_example.py +134 -0
  75. tests/error_handling_example.py +172 -0
  76. tests/redis_key_validation_demo.py +131 -0
  77. tests/response_improvements_example.py +145 -0
  78. tests/test_advanced_tools.py +149 -0
  79. tests/test_all_redis_key_configs.py +146 -0
  80. tests/test_authenticated_proxy.py +142 -0
  81. tests/test_cleaners.py +55 -0
  82. tests/test_comprehensive.py +147 -0
  83. tests/test_config_validator.py +194 -0
  84. tests/test_date_tools.py +124 -0
  85. tests/test_dynamic_downloaders_proxy.py +125 -0
  86. tests/test_dynamic_proxy.py +93 -0
  87. tests/test_dynamic_proxy_config.py +147 -0
  88. tests/test_dynamic_proxy_real.py +110 -0
  89. tests/test_edge_cases.py +304 -0
  90. tests/test_enhanced_error_handler.py +271 -0
  91. tests/test_env_config.py +122 -0
  92. tests/test_error_handler_compatibility.py +113 -0
  93. tests/test_framework_env_usage.py +104 -0
  94. tests/test_integration.py +357 -0
  95. tests/test_item_dedup_redis_key.py +123 -0
  96. tests/test_parsel.py +30 -0
  97. tests/test_performance.py +328 -0
  98. tests/test_queue_manager_redis_key.py +177 -0
  99. tests/test_redis_connection_pool.py +295 -0
  100. tests/test_redis_key_naming.py +182 -0
  101. tests/test_redis_key_validator.py +124 -0
  102. tests/test_response_improvements.py +153 -0
  103. tests/test_simple_response.py +62 -0
  104. tests/test_telecom_spider_redis_key.py +206 -0
  105. tests/test_template_content.py +88 -0
  106. tests/test_template_redis_key.py +135 -0
  107. tests/test_tools.py +154 -0
  108. tests/tools_example.py +258 -0
  109. crawlo/core/enhanced_engine.py +0 -190
  110. crawlo-1.1.3.dist-info/METADATA +0 -635
  111. crawlo-1.1.3.dist-info/RECORD +0 -113
  112. {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/WHEEL +0 -0
  113. {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/entry_points.txt +0 -0
  114. {crawlo-1.1.3.dist-info → crawlo-1.1.5.dist-info}/top_level.txt +0 -0
  115. {examples → tests}/controlled_spider_example.py +0 -0
@@ -1,6 +1,7 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding:UTF-8 -*-
3
3
  import asyncio
4
+ from typing import Any, Optional
4
5
 
5
6
  from crawlo.utils.log import get_logger
6
7
  from crawlo.event import spider_opened, spider_closed
@@ -8,12 +9,12 @@ from crawlo.event import spider_opened, spider_closed
8
9
 
9
10
  class LogIntervalExtension(object):
10
11
 
11
- def __init__(self, crawler):
12
- self.task = None
12
+ def __init__(self, crawler: Any):
13
+ self.task: Optional[asyncio.Task] = None
13
14
  self.stats = crawler.stats
14
15
  self.item_count = 0
15
16
  self.response_count = 0
16
- self.seconds = crawler.settings.get('INTERVAL')
17
+ self.seconds = crawler.settings.get('INTERVAL', 60) # 默认60秒
17
18
  self.interval = int(self.seconds / 60) if self.seconds % 60 == 0 else self.seconds
18
19
  self.interval = "" if self.interval == 1 else self.interval
19
20
  self.unit = 'min' if self.seconds % 60 == 0 else 's'
@@ -21,29 +22,37 @@ class LogIntervalExtension(object):
21
22
  self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
22
23
 
23
24
  @classmethod
24
- def create_instance(cls, crawler):
25
+ def create_instance(cls, crawler: Any) -> 'LogIntervalExtension':
25
26
  o = cls(crawler)
26
27
  crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
27
28
  crawler.subscriber.subscribe(o.spider_closed, event=spider_closed)
28
29
  return o
29
30
 
30
- async def spider_opened(self):
31
+ async def spider_opened(self) -> None:
31
32
  self.task = asyncio.create_task(self.interval_log())
32
- await self.task
33
33
 
34
- async def spider_closed(self):
34
+ async def spider_closed(self) -> None:
35
35
  if self.task:
36
36
  self.task.cancel()
37
+ try:
38
+ await self.task
39
+ except asyncio.CancelledError:
40
+ pass
41
+ self.task = None
37
42
 
38
- async def interval_log(self):
43
+ async def interval_log(self) -> None:
39
44
  while True:
40
- last_item_count = self.stats.get_value('item_successful_count', default=0)
41
- last_response_count = self.stats.get_value('response_received_count', default=0)
42
- item_rate = last_item_count - self.item_count
43
- response_rate = last_response_count - self.response_count
44
- self.item_count, self.response_count = last_item_count, last_response_count
45
- self.logger.info(
46
- f'Crawled {last_response_count} pages (at {response_rate} pages/{self.interval}{self.unit}),'
47
- f' Got {last_item_count} items (at {item_rate} items/{self.interval}{self.unit}).'
48
- )
49
- await asyncio.sleep(self.seconds)
45
+ try:
46
+ last_item_count = self.stats.get_value('item_successful_count', default=0)
47
+ last_response_count = self.stats.get_value('response_received_count', default=0)
48
+ item_rate = last_item_count - self.item_count
49
+ response_rate = last_response_count - self.response_count
50
+ self.item_count, self.response_count = last_item_count, last_response_count
51
+ self.logger.info(
52
+ f'Crawled {last_response_count} pages (at {response_rate} pages/{self.interval}{self.unit}),'
53
+ f' Got {last_item_count} items (at {item_rate} items/{self.interval}{self.unit}).'
54
+ )
55
+ await asyncio.sleep(self.seconds)
56
+ except Exception as e:
57
+ self.logger.error(f"Error in interval logging: {e}")
58
+ await asyncio.sleep(self.seconds) # 即使出错也继续执行
@@ -1,44 +1,82 @@
1
1
  #!/usr/bin/python
2
2
  # -*- coding:UTF-8 -*-
3
+ from typing import Any
4
+
3
5
  from crawlo import event
4
6
  from crawlo.utils.date_tools import now, time_diff
5
7
 
6
8
 
7
9
  class LogStats(object):
8
10
 
9
- def __init__(self, stats):
11
+ def __init__(self, stats: Any):
10
12
  self._stats = stats
11
13
 
12
14
  @classmethod
13
- def create_instance(cls, crawler):
15
+ def create_instance(cls, crawler: Any) -> 'LogStats':
14
16
  o = cls(crawler.stats)
15
- crawler.subscriber.subscribe(o.spider_opened, event=event.spider_opened)
16
- crawler.subscriber.subscribe(o.spider_closed, event=event.spider_closed)
17
- crawler.subscriber.subscribe(o.item_successful, event=event.item_successful)
18
- crawler.subscriber.subscribe(o.item_discard, event=event.item_discard)
19
- crawler.subscriber.subscribe(o.response_received, event=event.response_received)
20
- crawler.subscriber.subscribe(o.request_scheduled, event=event.request_scheduled)
17
+ # 订阅所有需要的事件
18
+ event_subscriptions = [
19
+ (o.spider_opened, event.spider_opened),
20
+ (o.spider_closed, event.spider_closed),
21
+ (o.item_successful, event.item_successful),
22
+ (o.item_discard, event.item_discard),
23
+ (o.response_received, event.response_received),
24
+ (o.request_scheduled, event.request_scheduled),
25
+ ]
26
+
27
+ for handler, evt in event_subscriptions:
28
+ try:
29
+ crawler.subscriber.subscribe(handler, event=evt)
30
+ except Exception as e:
31
+ # 获取日志记录器并记录错误
32
+ from crawlo.utils.log import get_logger
33
+ logger = get_logger(cls.__name__)
34
+ logger.error(f"Failed to subscribe to event {evt}: {e}")
21
35
 
22
36
  return o
23
37
 
24
- async def spider_opened(self):
25
- self._stats['start_time'] = now(fmt='%Y-%m-%d %H:%M:%S')
38
+ async def spider_opened(self) -> None:
39
+ try:
40
+ self._stats['start_time'] = now(fmt='%Y-%m-%d %H:%M:%S')
41
+ except Exception as e:
42
+ # 静默处理,避免影响爬虫运行
43
+ pass
26
44
 
27
- async def spider_closed(self):
28
- self._stats['end_time'] = now(fmt='%Y-%m-%d %H:%M:%S')
29
- self._stats['cost_time(s)'] = time_diff(start=self._stats['start_time'], end=self._stats['end_time'])
45
+ async def spider_closed(self) -> None:
46
+ try:
47
+ self._stats['end_time'] = now(fmt='%Y-%m-%d %H:%M:%S')
48
+ self._stats['cost_time(s)'] = time_diff(start=self._stats['start_time'], end=self._stats['end_time'])
49
+ except Exception as e:
50
+ # 静默处理,避免影响爬虫运行
51
+ pass
30
52
 
31
- async def item_successful(self, _item, _spider):
32
- self._stats.inc_value('item_successful_count')
53
+ async def item_successful(self, _item: Any, _spider: Any) -> None:
54
+ try:
55
+ self._stats.inc_value('item_successful_count')
56
+ except Exception as e:
57
+ # 静默处理,避免影响爬虫运行
58
+ pass
33
59
 
34
- async def item_discard(self, _item, exc, _spider):
35
- self._stats.inc_value('item_discard_count')
36
- reason = exc.msg
37
- if reason:
38
- self._stats.inc_value(f"item_discard/{reason}")
60
+ async def item_discard(self, _item: Any, exc: Any, _spider: Any) -> None:
61
+ try:
62
+ self._stats.inc_value('item_discard_count')
63
+ reason = getattr(exc, 'msg', None) # 更安全地获取属性
64
+ if reason:
65
+ self._stats.inc_value(f"item_discard/{reason}")
66
+ except Exception as e:
67
+ # 静默处理,避免影响爬虫运行
68
+ pass
39
69
 
40
- async def response_received(self, _response, _spider):
41
- self._stats.inc_value('response_received_count')
70
+ async def response_received(self, _response: Any, _spider: Any) -> None:
71
+ try:
72
+ self._stats.inc_value('response_received_count')
73
+ except Exception as e:
74
+ # 静默处理,避免影响爬虫运行
75
+ pass
42
76
 
43
- async def request_scheduled(self, _request, _spider):
44
- self._stats.inc_value('request_scheduler_count')
77
+ async def request_scheduled(self, _request: Any, _spider: Any) -> None:
78
+ try:
79
+ self._stats.inc_value('request_scheduler_count')
80
+ except Exception as e:
81
+ # 静默处理,避免影响爬虫运行
82
+ pass
@@ -1,3 +1,4 @@
1
+ from typing import Any
1
2
  from crawlo.exceptions import NotConfigured
2
3
  from crawlo.utils.log import get_logger
3
4
  from crawlo.utils.log import LoggerManager
@@ -9,27 +10,35 @@ class CustomLoggerExtension:
9
10
  遵循与 ExtensionManager 一致的接口规范:使用 create_instance
10
11
  """
11
12
 
12
- def __init__(self, settings):
13
+ def __init__(self, settings: Any):
13
14
  self.settings = settings
14
15
  # 初始化全局日志配置
15
16
  LoggerManager.configure(settings)
16
17
 
17
18
  @classmethod
18
- def create_instance(cls, crawler, *args, **kwargs):
19
+ def create_instance(cls, crawler: Any, *args: Any, **kwargs: Any) -> 'CustomLoggerExtension':
19
20
  """
20
21
  工厂方法:兼容 ExtensionManager 的创建方式
21
22
  被 ExtensionManager 调用
22
23
  """
23
24
  # 可以通过 settings 控制是否启用
24
- if not crawler.settings.get('LOG_FILE') and not crawler.settings.get('LOG_ENABLE_CUSTOM'):
25
+ log_file = crawler.settings.get('LOG_FILE')
26
+ log_enable_custom = crawler.settings.get('LOG_ENABLE_CUSTOM', False)
27
+
28
+ # 只有当没有配置日志文件且未启用自定义日志时才禁用
29
+ if not log_file and not log_enable_custom:
25
30
  raise NotConfigured("CustomLoggerExtension: LOG_FILE not set and LOG_ENABLE_CUSTOM=False")
26
31
 
27
32
  return cls(crawler.settings)
28
33
 
29
- def spider_opened(self, spider):
34
+ def spider_opened(self, spider: Any) -> None:
30
35
  logger = get_logger(__name__)
31
- logger.info(
32
- f"CustomLoggerExtension: Logging initialized. "
33
- f"LOG_FILE={self.settings.get('LOG_FILE')}, "
34
- f"LOG_LEVEL={self.settings.get('LOG_LEVEL')}"
35
- )
36
+ try:
37
+ logger.info(
38
+ f"CustomLoggerExtension: Logging initialized. "
39
+ f"LOG_FILE={self.settings.get('LOG_FILE')}, "
40
+ f"LOG_LEVEL={self.settings.get('LOG_LEVEL')}"
41
+ )
42
+ except Exception as e:
43
+ # 即使日志初始化信息无法打印,也不应该影响程序运行
44
+ pass
@@ -0,0 +1,105 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import asyncio
4
+ import psutil
5
+ from typing import Any, Optional
6
+
7
+ from crawlo.utils.log import get_logger
8
+ from crawlo.utils.error_handler import ErrorHandler
9
+ from crawlo.event import spider_opened, spider_closed
10
+
11
+
12
+ class MemoryMonitorExtension:
13
+ """
14
+ 内存监控扩展
15
+ 定期监控爬虫进程的内存使用情况,并在超出阈值时发出警告
16
+ """
17
+
18
+ def __init__(self, crawler: Any):
19
+ self.task: Optional[asyncio.Task] = None
20
+ self.process = psutil.Process()
21
+ self.settings = crawler.settings
22
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
23
+ self.error_handler = ErrorHandler(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
24
+
25
+ # 获取配置参数
26
+ self.interval = self.settings.get_int('MEMORY_MONITOR_INTERVAL', 60) # 默认60秒检查一次
27
+ self.warning_threshold = self.settings.get_float('MEMORY_WARNING_THRESHOLD', 80.0) # 默认80%警告阈值
28
+ self.critical_threshold = self.settings.get_float('MEMORY_CRITICAL_THRESHOLD', 90.0) # 默认90%严重阈值
29
+
30
+ @classmethod
31
+ def create_instance(cls, crawler: Any) -> 'MemoryMonitorExtension':
32
+ # 只有当配置启用时才创建实例
33
+ if not crawler.settings.get_bool('MEMORY_MONITOR_ENABLED', False):
34
+ from crawlo.exceptions import NotConfigured
35
+ raise NotConfigured("MemoryMonitorExtension: MEMORY_MONITOR_ENABLED is False")
36
+
37
+ o = cls(crawler)
38
+ crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
39
+ crawler.subscriber.subscribe(o.spider_closed, event=spider_closed)
40
+ return o
41
+
42
+ async def spider_opened(self) -> None:
43
+ """爬虫启动时开始监控"""
44
+ try:
45
+ self.task = asyncio.create_task(self._monitor_loop())
46
+ self.logger.info(
47
+ f"Memory monitor started. Interval: {self.interval}s, "
48
+ f"Warning threshold: {self.warning_threshold}%, Critical threshold: {self.critical_threshold}%"
49
+ )
50
+ except Exception as e:
51
+ self.error_handler.handle_error(
52
+ e,
53
+ context="启动内存监控失败",
54
+ raise_error=False
55
+ )
56
+
57
+ async def spider_closed(self) -> None:
58
+ """爬虫关闭时停止监控"""
59
+ try:
60
+ if self.task:
61
+ self.task.cancel()
62
+ try:
63
+ await self.task
64
+ except asyncio.CancelledError:
65
+ pass
66
+ self.task = None
67
+ self.logger.info("Memory monitor stopped.")
68
+ except Exception as e:
69
+ self.error_handler.handle_error(
70
+ e,
71
+ context="停止内存监控失败",
72
+ raise_error=False
73
+ )
74
+
75
+ async def _monitor_loop(self) -> None:
76
+ """内存监控循环"""
77
+ while True:
78
+ try:
79
+ # 获取内存使用信息
80
+ memory_info = self.process.memory_info()
81
+ memory_percent = self.process.memory_percent()
82
+
83
+ # 记录内存使用情况
84
+ self.logger.debug(
85
+ f"Memory usage: {memory_percent:.2f}% "
86
+ f"(RSS: {memory_info.rss / 1024 / 1024:.2f} MB, "
87
+ f"VMS: {memory_info.vms / 1024 / 1024:.2f} MB)"
88
+ )
89
+
90
+ # 检查是否超过阈值
91
+ if memory_percent >= self.critical_threshold:
92
+ self.logger.critical(
93
+ f"Memory usage critical: {memory_percent:.2f}% "
94
+ f"(RSS: {memory_info.rss / 1024 / 1024:.2f} MB)"
95
+ )
96
+ elif memory_percent >= self.warning_threshold:
97
+ self.logger.warning(
98
+ f"Memory usage high: {memory_percent:.2f}% "
99
+ f"(RSS: {memory_info.rss / 1024 / 1024:.2f} MB)"
100
+ )
101
+
102
+ await asyncio.sleep(self.interval)
103
+ except Exception as e:
104
+ self.logger.error(f"Error in memory monitoring: {e}")
105
+ await asyncio.sleep(self.interval)
@@ -0,0 +1,134 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import io
4
+ import os
5
+ import pstats
6
+ import asyncio
7
+ import cProfile
8
+ from typing import Any, Optional
9
+
10
+ from crawlo.utils.log import get_logger
11
+ from crawlo.utils.error_handler import ErrorHandler
12
+ from crawlo.event import spider_opened, spider_closed
13
+
14
+
15
+ class PerformanceProfilerExtension:
16
+ """
17
+ 性能分析扩展
18
+ 在爬虫运行期间进行性能分析,帮助优化爬虫性能
19
+ """
20
+
21
+ def __init__(self, crawler: Any):
22
+ self.settings = crawler.settings
23
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
24
+ self.error_handler = ErrorHandler(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
25
+
26
+ # 获取配置参数
27
+ self.enabled = self.settings.get_bool('PERFORMANCE_PROFILER_ENABLED', False)
28
+ self.output_dir = self.settings.get('PERFORMANCE_PROFILER_OUTPUT_DIR', 'profiling')
29
+ self.interval = self.settings.get_int('PERFORMANCE_PROFILER_INTERVAL', 300) # 默认5分钟
30
+
31
+ self.profiler: Optional[cProfile.Profile] = None
32
+ self.task: Optional[asyncio.Task] = None
33
+
34
+ # 创建输出目录
35
+ if self.enabled:
36
+ os.makedirs(self.output_dir, exist_ok=True)
37
+
38
+ @classmethod
39
+ def create_instance(cls, crawler: Any) -> 'PerformanceProfilerExtension':
40
+ # 只有当配置启用时才创建实例
41
+ if not crawler.settings.get_bool('PERFORMANCE_PROFILER_ENABLED', False):
42
+ from crawlo.exceptions import NotConfigured
43
+ raise NotConfigured("PerformanceProfilerExtension: PERFORMANCE_PROFILER_ENABLED is False")
44
+
45
+ o = cls(crawler)
46
+ if o.enabled:
47
+ crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
48
+ crawler.subscriber.subscribe(o.spider_closed, event=spider_closed)
49
+ return o
50
+
51
+ async def spider_opened(self) -> None:
52
+ """爬虫启动时开始性能分析"""
53
+ if not self.enabled:
54
+ return
55
+
56
+ try:
57
+ self.profiler = cProfile.Profile()
58
+ self.profiler.enable()
59
+
60
+ # 启动定期保存分析结果的任务
61
+ self.task = asyncio.create_task(self._periodic_save())
62
+
63
+ self.logger.info("Performance profiler started.")
64
+ except Exception as e:
65
+ self.error_handler.handle_error(
66
+ e,
67
+ context="启动性能分析器失败",
68
+ raise_error=False
69
+ )
70
+
71
+ async def spider_closed(self) -> None:
72
+ """爬虫关闭时停止性能分析并保存结果"""
73
+ if not self.enabled or not self.profiler:
74
+ return
75
+
76
+ try:
77
+ # 停止定期保存任务
78
+ if self.task:
79
+ self.task.cancel()
80
+ try:
81
+ await self.task
82
+ except asyncio.CancelledError:
83
+ pass
84
+
85
+ # 停止分析器并保存最终结果
86
+ self.profiler.disable()
87
+
88
+ # 保存分析结果
89
+ await self._save_profile("final")
90
+ self.logger.info("Performance profiler stopped and results saved.")
91
+ except Exception as e:
92
+ self.error_handler.handle_error(
93
+ e,
94
+ context="停止性能分析器失败",
95
+ raise_error=False
96
+ )
97
+
98
+ async def _periodic_save(self) -> None:
99
+ """定期保存分析结果"""
100
+ counter = 1
101
+ while True:
102
+ try:
103
+ await asyncio.sleep(self.interval)
104
+ if self.profiler:
105
+ # 临时禁用分析器以保存结果
106
+ self.profiler.disable()
107
+ await self._save_profile(f"periodic_{counter}")
108
+ counter += 1
109
+ # 重新启用分析器
110
+ self.profiler.enable()
111
+ except asyncio.CancelledError:
112
+ break
113
+ except Exception as e:
114
+ self.logger.error(f"Error in periodic profiling save: {e}")
115
+
116
+ async def _save_profile(self, name: str) -> None:
117
+ """保存分析结果到文件"""
118
+ try:
119
+ # 创建内存中的字符串流
120
+ s = io.StringIO()
121
+ ps = pstats.Stats(self.profiler, stream=s)
122
+
123
+ # 排序并打印统计信息
124
+ ps.sort_stats('cumulative')
125
+ ps.print_stats()
126
+
127
+ # 保存到文件
128
+ filename = os.path.join(self.output_dir, f'profile_{name}.txt')
129
+ with open(filename, 'w', encoding='utf-8') as f:
130
+ f.write(s.getvalue())
131
+
132
+ self.logger.info(f"Performance profile saved to {filename}")
133
+ except Exception as e:
134
+ self.logger.error(f"Error saving performance profile: {e}")
@@ -0,0 +1,108 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import os
4
+ import json
5
+ from typing import Any
6
+ from datetime import datetime
7
+
8
+ from crawlo import event
9
+ from crawlo.utils.log import get_logger
10
+
11
+
12
+ class RequestRecorderExtension:
13
+ """
14
+ 请求记录扩展
15
+ 记录所有发送的请求信息到文件,便于调试和分析
16
+ """
17
+
18
+ def __init__(self, crawler: Any):
19
+ self.settings = crawler.settings
20
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
21
+
22
+ # 获取配置参数
23
+ self.enabled = self.settings.get_bool('REQUEST_RECORDER_ENABLED', False)
24
+ self.output_dir = self.settings.get('REQUEST_RECORDER_OUTPUT_DIR', 'requests_log')
25
+ self.max_file_size = self.settings.get_int('REQUEST_RECORDER_MAX_FILE_SIZE', 10 * 1024 * 1024) # 默认10MB
26
+
27
+ # 创建输出目录
28
+ if self.enabled:
29
+ os.makedirs(self.output_dir, exist_ok=True)
30
+
31
+ self.current_file = None
32
+ self.current_file_size = 0
33
+
34
+ @classmethod
35
+ def create_instance(cls, crawler: Any) -> 'RequestRecorderExtension':
36
+ # 只有当配置启用时才创建实例
37
+ if not crawler.settings.get_bool('REQUEST_RECORDER_ENABLED', False):
38
+ from crawlo.exceptions import NotConfigured
39
+ raise NotConfigured("RequestRecorderExtension: REQUEST_RECORDER_ENABLED is False")
40
+
41
+ o = cls(crawler)
42
+ if o.enabled:
43
+ crawler.subscriber.subscribe(o.request_scheduled, event=event.request_scheduled)
44
+ crawler.subscriber.subscribe(o.response_received, event=event.response_received)
45
+ crawler.subscriber.subscribe(o.spider_closed, event=event.spider_closed)
46
+ return o
47
+
48
+ async def request_scheduled(self, request: Any, spider: Any) -> None:
49
+ """记录调度的请求"""
50
+ if not self.enabled:
51
+ return
52
+
53
+ try:
54
+ request_info = {
55
+ 'timestamp': datetime.now().isoformat(),
56
+ 'type': 'request',
57
+ 'url': request.url,
58
+ 'method': request.method,
59
+ 'headers': dict(request.headers),
60
+ 'meta': getattr(request, 'meta', {}),
61
+ }
62
+
63
+ await self._write_record(request_info)
64
+ except Exception as e:
65
+ self.logger.error(f"Error recording request: {e}")
66
+
67
+ async def response_received(self, response: Any, spider: Any) -> None:
68
+ """记录接收到的响应"""
69
+ if not self.enabled:
70
+ return
71
+
72
+ try:
73
+ response_info = {
74
+ 'timestamp': datetime.now().isoformat(),
75
+ 'type': 'response',
76
+ 'url': response.url,
77
+ 'status_code': response.status_code,
78
+ 'headers': dict(response.headers),
79
+ }
80
+
81
+ await self._write_record(response_info)
82
+ except Exception as e:
83
+ self.logger.error(f"Error recording response: {e}")
84
+
85
+ async def spider_closed(self, spider: Any) -> None:
86
+ """爬虫关闭时清理资源"""
87
+ if self.current_file:
88
+ self.current_file.close()
89
+ self.current_file = None
90
+ self.logger.info("Request recorder closed.")
91
+
92
+ async def _write_record(self, record: dict) -> None:
93
+ """写入记录到文件"""
94
+ # 检查是否需要创建新文件
95
+ if not self.current_file or self.current_file_size > self.max_file_size:
96
+ if self.current_file:
97
+ self.current_file.close()
98
+
99
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
100
+ filename = os.path.join(self.output_dir, f'requests_{timestamp}.jsonl')
101
+ self.current_file = open(filename, 'a', encoding='utf-8')
102
+ self.current_file_size = 0
103
+
104
+ # 写入记录
105
+ line = json.dumps(record, ensure_ascii=False) + '\n'
106
+ self.current_file.write(line)
107
+ self.current_file.flush()
108
+ self.current_file_size += len(line.encode('utf-8'))