crawlo 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (80) hide show
  1. crawlo/__init__.py +9 -6
  2. crawlo/__version__.py +1 -2
  3. crawlo/core/__init__.py +2 -2
  4. crawlo/core/engine.py +158 -158
  5. crawlo/core/processor.py +40 -40
  6. crawlo/core/scheduler.py +57 -59
  7. crawlo/crawler.py +242 -107
  8. crawlo/downloader/__init__.py +78 -78
  9. crawlo/downloader/aiohttp_downloader.py +259 -96
  10. crawlo/downloader/httpx_downloader.py +187 -48
  11. crawlo/downloader/playwright_downloader.py +160 -160
  12. crawlo/event.py +11 -11
  13. crawlo/exceptions.py +64 -64
  14. crawlo/extension/__init__.py +31 -31
  15. crawlo/extension/log_interval.py +49 -49
  16. crawlo/extension/log_stats.py +44 -44
  17. crawlo/filters/__init__.py +37 -37
  18. crawlo/filters/aioredis_filter.py +157 -129
  19. crawlo/filters/memory_filter.py +202 -203
  20. crawlo/filters/redis_filter.py +119 -119
  21. crawlo/items/__init__.py +62 -62
  22. crawlo/items/items.py +118 -118
  23. crawlo/middleware/__init__.py +21 -21
  24. crawlo/middleware/default_header.py +32 -32
  25. crawlo/middleware/download_delay.py +28 -28
  26. crawlo/middleware/middleware_manager.py +140 -140
  27. crawlo/middleware/request_ignore.py +30 -30
  28. crawlo/middleware/response_code.py +18 -18
  29. crawlo/middleware/response_filter.py +26 -26
  30. crawlo/middleware/retry.py +90 -89
  31. crawlo/network/__init__.py +7 -7
  32. crawlo/network/request.py +205 -155
  33. crawlo/network/response.py +166 -93
  34. crawlo/pipelines/__init__.py +13 -13
  35. crawlo/pipelines/console_pipeline.py +39 -39
  36. crawlo/pipelines/mongo_pipeline.py +116 -116
  37. crawlo/pipelines/mysql_batch_pipline.py +133 -133
  38. crawlo/pipelines/mysql_pipeline.py +195 -176
  39. crawlo/pipelines/pipeline_manager.py +56 -56
  40. crawlo/settings/__init__.py +7 -7
  41. crawlo/settings/default_settings.py +93 -89
  42. crawlo/settings/setting_manager.py +99 -99
  43. crawlo/spider/__init__.py +36 -36
  44. crawlo/stats_collector.py +59 -47
  45. crawlo/subscriber.py +106 -27
  46. crawlo/task_manager.py +27 -27
  47. crawlo/templates/item_template.tmpl +21 -21
  48. crawlo/templates/project_template/main.py +32 -32
  49. crawlo/templates/project_template/setting.py +189 -189
  50. crawlo/templates/spider_template.tmpl +30 -30
  51. crawlo/utils/__init__.py +7 -7
  52. crawlo/utils/concurrency_manager.py +125 -0
  53. crawlo/utils/date_tools.py +177 -177
  54. crawlo/utils/func_tools.py +82 -82
  55. crawlo/utils/log.py +39 -39
  56. crawlo/utils/pqueue.py +173 -173
  57. crawlo/utils/project.py +59 -59
  58. crawlo/utils/request.py +122 -85
  59. crawlo/utils/system.py +11 -11
  60. crawlo/utils/tools.py +303 -0
  61. crawlo/utils/url.py +39 -39
  62. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/METADATA +48 -36
  63. crawlo-1.0.3.dist-info/RECORD +80 -0
  64. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/top_level.txt +1 -0
  65. tests/__init__.py +7 -0
  66. tests/baidu_spider/__init__.py +7 -0
  67. tests/baidu_spider/demo.py +94 -0
  68. tests/baidu_spider/items.py +25 -0
  69. tests/baidu_spider/middleware.py +49 -0
  70. tests/baidu_spider/pipeline.py +55 -0
  71. tests/baidu_spider/request_fingerprints.txt +9 -0
  72. tests/baidu_spider/run.py +27 -0
  73. tests/baidu_spider/settings.py +78 -0
  74. tests/baidu_spider/spiders/__init__.py +7 -0
  75. tests/baidu_spider/spiders/bai_du.py +61 -0
  76. tests/baidu_spider/spiders/sina.py +79 -0
  77. crawlo-1.0.1.dist-info/RECORD +0 -67
  78. crawlo-1.0.1.dist-info/licenses/LICENSE +0 -23
  79. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/WHEEL +0 -0
  80. {crawlo-1.0.1.dist-info → crawlo-1.0.3.dist-info}/entry_points.txt +0 -0
@@ -1,44 +1,44 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from crawlo import event
4
- from crawlo.utils.date_tools import get_current_time, time_diff_seconds
5
-
6
-
7
- class LogStats(object):
8
-
9
- def __init__(self, stats):
10
- self._stats = stats
11
-
12
- @classmethod
13
- def create_instance(cls, crawler):
14
- o = cls(crawler.stats)
15
- crawler.subscriber.subscribe(o.spider_opened, event=event.spider_opened)
16
- crawler.subscriber.subscribe(o.spider_closed, event=event.spider_closed)
17
- crawler.subscriber.subscribe(o.item_successful, event=event.item_successful)
18
- crawler.subscriber.subscribe(o.item_discard, event=event.item_discard)
19
- crawler.subscriber.subscribe(o.response_received, event=event.response_received)
20
- crawler.subscriber.subscribe(o.request_scheduled, event=event.request_scheduled)
21
-
22
- return o
23
-
24
- async def spider_opened(self):
25
- self._stats['start_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
26
-
27
- async def spider_closed(self):
28
- self._stats['end_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
29
- self._stats['cost_time(s)'] = time_diff_seconds(start_time=self._stats['start_time'], end_time=self._stats['end_time'])
30
-
31
- async def item_successful(self, _item, _spider):
32
- self._stats.inc_value('item_successful_count')
33
-
34
- async def item_discard(self, _item, exc, _spider):
35
- self._stats.inc_value('item_discard_count')
36
- reason = exc.msg
37
- if reason:
38
- self._stats.inc_value(f"item_discard/{reason}")
39
-
40
- async def response_received(self, _response, _spider):
41
- self._stats.inc_value('response_received_count')
42
-
43
- async def request_scheduled(self, _request, _spider):
44
- self._stats.inc_value('request_scheduler_count')
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo import event
4
+ from crawlo.utils.date_tools import get_current_time, time_diff_seconds
5
+
6
+
7
+ class LogStats(object):
8
+
9
+ def __init__(self, stats):
10
+ self._stats = stats
11
+
12
+ @classmethod
13
+ def create_instance(cls, crawler):
14
+ o = cls(crawler.stats)
15
+ crawler.subscriber.subscribe(o.spider_opened, event=event.spider_opened)
16
+ crawler.subscriber.subscribe(o.spider_closed, event=event.spider_closed)
17
+ crawler.subscriber.subscribe(o.item_successful, event=event.item_successful)
18
+ crawler.subscriber.subscribe(o.item_discard, event=event.item_discard)
19
+ crawler.subscriber.subscribe(o.response_received, event=event.response_received)
20
+ crawler.subscriber.subscribe(o.request_scheduled, event=event.request_scheduled)
21
+
22
+ return o
23
+
24
+ async def spider_opened(self):
25
+ self._stats['start_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
26
+
27
+ async def spider_closed(self):
28
+ self._stats['end_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
29
+ self._stats['cost_time(s)'] = time_diff_seconds(start_time=self._stats['start_time'], end_time=self._stats['end_time'])
30
+
31
+ async def item_successful(self, _item, _spider):
32
+ self._stats.inc_value('item_successful_count')
33
+
34
+ async def item_discard(self, _item, exc, _spider):
35
+ self._stats.inc_value('item_discard_count')
36
+ reason = exc.msg
37
+ if reason:
38
+ self._stats.inc_value(f"item_discard/{reason}")
39
+
40
+ async def response_received(self, _response, _spider):
41
+ self._stats.inc_value('response_received_count')
42
+
43
+ async def request_scheduled(self, _request, _spider):
44
+ self._stats.inc_value('request_scheduler_count')
@@ -1,37 +1,37 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from abc import ABC, abstractmethod
4
-
5
- from crawlo import Request
6
- from crawlo.utils.request import request_fingerprint
7
-
8
-
9
- class BaseFilter(ABC):
10
-
11
- def __init__(self, logger, stats, debug: bool):
12
- self.logger = logger
13
- self.stats = stats
14
- self.debug = debug
15
-
16
- @classmethod
17
- def create_instance(cls, *args, **kwargs) -> 'BaseFilter':
18
- return cls(*args, **kwargs)
19
-
20
- def requested(self, request: Request):
21
- fp = request_fingerprint(request)
22
- if fp in self:
23
- return True
24
- self.add_fingerprint(fp)
25
- return False
26
-
27
- @abstractmethod
28
- def add_fingerprint(self, fp) -> None:
29
- pass
30
-
31
- def log_stats(self, request: Request) -> None:
32
- if self.debug:
33
- self.logger.debug(f'Filtered duplicate request: {request}')
34
- self.stats.inc_value(f'{self}/filtered_count')
35
-
36
- def __str__(self) -> str:
37
- return f'{self.__class__.__name__}'
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from abc import ABC, abstractmethod
4
+
5
+ from crawlo import Request
6
+ from crawlo.utils.request import request_fingerprint
7
+
8
+
9
+ class BaseFilter(ABC):
10
+
11
+ def __init__(self, logger, stats, debug: bool):
12
+ self.logger = logger
13
+ self.stats = stats
14
+ self.debug = debug
15
+
16
+ @classmethod
17
+ def create_instance(cls, *args, **kwargs) -> 'BaseFilter':
18
+ return cls(*args, **kwargs)
19
+
20
+ def requested(self, request: Request):
21
+ fp = request_fingerprint(request)
22
+ if fp in self:
23
+ return True
24
+ self.add_fingerprint(fp)
25
+ return False
26
+
27
+ @abstractmethod
28
+ def add_fingerprint(self, fp) -> None:
29
+ pass
30
+
31
+ def log_stats(self, request: Request) -> None:
32
+ if self.debug:
33
+ self.logger.debug(f'Filtered duplicate request: {request}')
34
+ self.stats.inc_value(f'{self}/filtered_count')
35
+
36
+ def __str__(self) -> str:
37
+ return f'{self.__class__.__name__}'
@@ -1,130 +1,158 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import Optional
4
-
5
- import aioredis
6
-
7
- from crawlo import Request
8
- from crawlo.filters import BaseFilter
9
- from crawlo.utils.log import get_logger
10
- from crawlo.utils.request import request_fingerprint
11
-
12
-
13
- class AioRedisFilter(BaseFilter):
14
- """使用Redis集合实现的异步请求去重过滤器(适用于分布式爬虫)"""
15
-
16
- def __init__(
17
- self,
18
- redis_key: str,
19
- client: aioredis.Redis,
20
- stats: dict,
21
- debug: bool,
22
- log_level: str,
23
- cleanup_fp: bool = False
24
- ):
25
- """
26
- 初始化过滤器
27
-
28
- 参数说明:
29
- redis_key: Redis中存储指纹的键名
30
- client: aioredis客户端实例
31
- stats: 统计信息字典
32
- debug: 是否启用调试模式
33
- log_level: 日志级别
34
- save_fp: 爬虫关闭时是否保留指纹数据
35
- """
36
- # 初始化日志记录器(使用类名作为日志标识)
37
- self.logger = get_logger(self.__class__.__name__, log_level)
38
- super().__init__(self.logger, stats, debug)
39
-
40
- self.redis_key = redis_key # Redis存储键(如:"project:request_fingerprints")
41
- self.redis = client # Redis异步客户端
42
- self.cleanup_fp = cleanup_fp # 是否持久化指纹数据
43
-
44
- @classmethod
45
- def create_instance(cls, crawler) -> 'BaseFilter':
46
- """从爬虫配置创建过滤器实例(工厂方法)"""
47
- # 从配置获取Redis连接参数(带默认值)
48
- redis_url = crawler.settings.get('REDIS_URL', 'redis://localhost:6379')
49
- decode_responses = crawler.settings.get_bool('DECODE_RESPONSES', True)
50
-
51
- try:
52
- # 创建Redis连接池(限制最大连接数20)
53
- redis_client = aioredis.from_url(
54
- redis_url,
55
- decode_responses=decode_responses,
56
- max_connections=20
57
- )
58
- except Exception as e:
59
- raise RuntimeError(f"Redis连接失败 {redis_url}: {str(e)}")
60
-
61
- # 使用项目名+配置键组合作为Redis键
62
- return cls(
63
- redis_key=f"{crawler.settings.get('PROJECT_NAME')}:{crawler.settings.get('REDIS_KEY', 'request_fingerprints')}",
64
- client=redis_client,
65
- stats=crawler.stats,
66
- cleanup_fp=crawler.settings.get_bool('CLEANUP_FP', False),
67
- debug=crawler.settings.get_bool('FILTER_DEBUG', False),
68
- log_level=crawler.settings.get('LOG_LEVEL', 'INFO')
69
- )
70
-
71
- async def requested(self, request: Request) -> bool:
72
- """
73
- 检查请求是否重复
74
-
75
- 参数:
76
- request: 要检查的请求对象
77
-
78
- 返回:
79
- bool: True表示重复请求,False表示新请求
80
- """
81
- fp = request_fingerprint(request) # 生成请求指纹
82
- try:
83
- # 检查指纹是否已存在集合中
84
- is_duplicate = await self.redis.sismember(self.redis_key, fp)
85
- if is_duplicate:
86
- # self.logger.debug(f"发现重复请求: {fp}")
87
- return True
88
-
89
- # 新请求则添加指纹
90
- await self.add_fingerprint(fp)
91
- return False
92
- except aioredis.RedisError as e:
93
- self.logger.error(f"Redis操作失败: {str(e)}")
94
- raise # 向上抛出异常
95
-
96
- async def add_fingerprint(self, fp: str) -> None:
97
- """向Redis集合添加新指纹"""
98
- try:
99
- await self.redis.sadd(self.redis_key, fp)
100
- self.logger.debug(f"新增指纹: {fp}")
101
- except aioredis.RedisError as e:
102
- self.logger.error(f"指纹添加失败: {str(e)}")
103
- raise
104
-
105
- async def closed(self, reason: Optional[str] = None) -> None:
106
- """
107
- 爬虫关闭时的处理(兼容Scrapy的关闭逻辑)
108
-
109
- 参数:
110
- reason: 爬虫关闭原因(Scrapy标准参数)
111
- """
112
- if self.cleanup_fp: # 仅在配置明确要求时清理
113
- try:
114
- deleted = await self.redis.delete(self.redis_key)
115
- self.logger.info(
116
- f"Cleaned {deleted} fingerprints from {self.redis_key} "
117
- f"(reason: {reason or 'manual'})"
118
- )
119
- except aioredis.RedisError as e:
120
- self.logger.warning(f"Cleanup failed: {e}")
121
- finally:
122
- await self._close_redis()
123
-
124
- async def _close_redis(self) -> None:
125
- """安全关闭Redis连接"""
126
- try:
127
- await self.redis.close()
128
- await self.redis.connection_pool.disconnect()
129
- except Exception as e:
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import Optional
4
+ import aioredis
5
+ from crawlo import Request
6
+ from crawlo.filters import BaseFilter
7
+ from crawlo.utils.log import get_logger
8
+ from crawlo.utils.request import request_fingerprint
9
+
10
+
11
+ class AioRedisFilter(BaseFilter):
12
+ """使用Redis集合实现的异步请求去重过滤器(适用于分布式爬虫)"""
13
+
14
+ def __init__(
15
+ self,
16
+ redis_key: str,
17
+ client: aioredis.Redis,
18
+ stats: dict,
19
+ debug: bool,
20
+ log_level: str,
21
+ cleanup_fp: bool = False
22
+ ):
23
+ """初始化过滤器"""
24
+ self.logger = get_logger(self.__class__.__name__, log_level)
25
+ super().__init__(self.logger, stats, debug)
26
+
27
+ self.redis_key = redis_key
28
+ self.redis = client
29
+ self.cleanup_fp = cleanup_fp
30
+
31
+ @classmethod
32
+ def create_instance(cls, crawler) -> 'BaseFilter':
33
+ """从爬虫配置创建过滤器实例"""
34
+ redis_url = crawler.settings.get('REDIS_URL', 'redis://localhost:6379')
35
+ decode_responses = crawler.settings.get_bool('DECODE_RESPONSES', False) # 关键:改为False
36
+
37
+ try:
38
+ redis_client = aioredis.from_url(
39
+ redis_url,
40
+ decode_responses=decode_responses,
41
+ max_connections=20,
42
+ encoding='utf-8'
43
+ )
44
+ except Exception as e:
45
+ raise RuntimeError(f"Redis连接失败 {redis_url}: {str(e)}")
46
+
47
+ return cls(
48
+ redis_key=f"{crawler.settings.get('PROJECT_NAME', 'default')}:{crawler.settings.get('REDIS_KEY', 'request_fingerprints')}",
49
+ client=redis_client,
50
+ stats=crawler.stats,
51
+ cleanup_fp=crawler.settings.get_bool('CLEANUP_FP', False),
52
+ debug=crawler.settings.get_bool('FILTER_DEBUG', False),
53
+ log_level=crawler.settings.get('LOG_LEVEL', 'INFO')
54
+ )
55
+
56
+ async def requested(self, request: Request) -> bool:
57
+ """
58
+ 检查请求是否重复
59
+ """
60
+ try:
61
+ fp = request_fingerprint(request)
62
+ self.logger.debug(f"Checking fingerprint: {fp}")
63
+
64
+ # 确保fp是字符串类型
65
+ if not isinstance(fp, str):
66
+ fp = str(fp)
67
+
68
+ # 检查Redis连接状态
69
+ if not self.redis:
70
+ raise RuntimeError("Redis client is not initialized")
71
+
72
+ # 检查指纹是否已存在
73
+ is_member = await self.redis.sismember(self.redis_key, fp)
74
+ self.logger.debug(f"Fingerprint {fp} exists: {is_member}")
75
+
76
+ if is_member:
77
+ if self.debug:
78
+ self.logger.debug(f"Filtered duplicate request: {fp}")
79
+ return True
80
+
81
+ # 添加新指纹
82
+ result = await self.redis.sadd(self.redis_key, fp)
83
+
84
+ if self.debug:
85
+ if result == 1:
86
+ self.logger.debug(f"Added new fingerprint: {fp}")
87
+ else:
88
+ self.logger.warning(f"Failed to add fingerprint: {fp}")
89
+
90
+ return False
91
+
92
+ except Exception as e:
93
+ self.logger.error(f"Filter check failed for {getattr(request, 'url', 'unknown')}: {str(e)}")
94
+ # 可以选择抛出异常或返回False(不过滤)
95
+ raise
96
+
97
+ async def add_fingerprint(self, fp: str) -> bool:
98
+ """向Redis集合添加新指纹"""
99
+ try:
100
+ if not isinstance(fp, str):
101
+ fp = str(fp)
102
+
103
+ result = await self.redis.sadd(self.redis_key, fp)
104
+ if self.debug:
105
+ self.logger.debug(f"Added fingerprint {fp}, result: {result}")
106
+ return result == 1
107
+ except Exception as e:
108
+ self.logger.error(f"Failed to add fingerprint {fp}: {str(e)}")
109
+ raise
110
+
111
+ async def get_stats(self) -> dict:
112
+ """获取当前过滤器统计信息"""
113
+ try:
114
+ count = await self.redis.scard(self.redis_key)
115
+ return {
116
+ 'total_fingerprints': count,
117
+ 'redis_key': self.redis_key,
118
+ **self.stats
119
+ }
120
+ except Exception as e:
121
+ self.logger.error(f"Failed to get stats: {str(e)}")
122
+ return self.stats
123
+
124
+ async def clear_all(self) -> int:
125
+ """清空所有指纹数据"""
126
+ try:
127
+ deleted = await self.redis.delete(self.redis_key)
128
+ self.logger.info(f"Cleared {deleted} keys")
129
+ return deleted
130
+ except Exception as e:
131
+ self.logger.error(f"Failed to clear fingerprints: {str(e)}")
132
+ raise
133
+
134
+ async def closed(self, reason: Optional[str] = None) -> None:
135
+ """爬虫关闭时的处理"""
136
+ try:
137
+ if self.cleanup_fp:
138
+ deleted = await self.redis.delete(self.redis_key)
139
+ self.logger.info(
140
+ f"Cleaned {deleted} fingerprints from {self.redis_key} "
141
+ f"(reason: {reason or 'manual'})"
142
+ )
143
+ else:
144
+ # 显示统计信息
145
+ count = await self.redis.scard(self.redis_key)
146
+ self.logger.info(f"Total fingerprints preserved: {count}")
147
+ except Exception as e:
148
+ self.logger.warning(f"Close operation failed: {e}")
149
+ finally:
150
+ await self._close_redis()
151
+
152
+ async def _close_redis(self) -> None:
153
+ """安全关闭Redis连接"""
154
+ try:
155
+ if hasattr(self.redis, 'close'):
156
+ await self.redis.close()
157
+ except Exception as e:
130
158
  self.logger.warning(f"Redis close error: {e}")