crawlo 1.0.2__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (79) hide show
  1. crawlo/__init__.py +9 -6
  2. crawlo/__version__.py +1 -2
  3. crawlo/core/__init__.py +2 -2
  4. crawlo/core/engine.py +158 -158
  5. crawlo/core/processor.py +40 -40
  6. crawlo/core/scheduler.py +57 -59
  7. crawlo/crawler.py +242 -222
  8. crawlo/downloader/__init__.py +78 -78
  9. crawlo/downloader/aiohttp_downloader.py +259 -96
  10. crawlo/downloader/httpx_downloader.py +187 -48
  11. crawlo/downloader/playwright_downloader.py +160 -160
  12. crawlo/event.py +11 -11
  13. crawlo/exceptions.py +64 -64
  14. crawlo/extension/__init__.py +31 -31
  15. crawlo/extension/log_interval.py +49 -49
  16. crawlo/extension/log_stats.py +44 -44
  17. crawlo/filters/__init__.py +37 -37
  18. crawlo/filters/aioredis_filter.py +150 -130
  19. crawlo/filters/memory_filter.py +202 -203
  20. crawlo/items/__init__.py +62 -62
  21. crawlo/items/items.py +118 -118
  22. crawlo/middleware/__init__.py +21 -21
  23. crawlo/middleware/default_header.py +32 -32
  24. crawlo/middleware/download_delay.py +28 -28
  25. crawlo/middleware/middleware_manager.py +140 -140
  26. crawlo/middleware/request_ignore.py +30 -30
  27. crawlo/middleware/response_code.py +18 -18
  28. crawlo/middleware/response_filter.py +26 -26
  29. crawlo/middleware/retry.py +90 -90
  30. crawlo/network/__init__.py +7 -7
  31. crawlo/network/request.py +204 -233
  32. crawlo/network/response.py +166 -162
  33. crawlo/pipelines/__init__.py +13 -13
  34. crawlo/pipelines/console_pipeline.py +39 -39
  35. crawlo/pipelines/mongo_pipeline.py +116 -116
  36. crawlo/pipelines/mysql_batch_pipline.py +133 -133
  37. crawlo/pipelines/mysql_pipeline.py +195 -195
  38. crawlo/pipelines/pipeline_manager.py +56 -56
  39. crawlo/settings/__init__.py +7 -7
  40. crawlo/settings/default_settings.py +94 -89
  41. crawlo/settings/setting_manager.py +99 -99
  42. crawlo/spider/__init__.py +36 -36
  43. crawlo/stats_collector.py +59 -47
  44. crawlo/subscriber.py +106 -106
  45. crawlo/task_manager.py +27 -27
  46. crawlo/templates/item_template.tmpl +21 -21
  47. crawlo/templates/project_template/main.py +32 -32
  48. crawlo/templates/project_template/setting.py +189 -189
  49. crawlo/templates/spider_template.tmpl +30 -30
  50. crawlo/utils/__init__.py +7 -7
  51. crawlo/utils/concurrency_manager.py +124 -124
  52. crawlo/utils/date_tools.py +177 -177
  53. crawlo/utils/func_tools.py +82 -82
  54. crawlo/utils/log.py +39 -39
  55. crawlo/utils/pqueue.py +173 -173
  56. crawlo/utils/project.py +59 -59
  57. crawlo/utils/request.py +122 -85
  58. crawlo/utils/system.py +11 -11
  59. crawlo/utils/tools.py +302 -302
  60. crawlo/utils/url.py +39 -39
  61. {crawlo-1.0.2.dist-info → crawlo-1.0.4.dist-info}/METADATA +48 -48
  62. crawlo-1.0.4.dist-info/RECORD +79 -0
  63. {crawlo-1.0.2.dist-info → crawlo-1.0.4.dist-info}/top_level.txt +1 -0
  64. tests/__init__.py +7 -0
  65. tests/baidu_spider/__init__.py +7 -0
  66. tests/baidu_spider/demo.py +94 -0
  67. tests/baidu_spider/items.py +25 -0
  68. tests/baidu_spider/middleware.py +49 -0
  69. tests/baidu_spider/pipeline.py +55 -0
  70. tests/baidu_spider/request_fingerprints.txt +9 -0
  71. tests/baidu_spider/run.py +27 -0
  72. tests/baidu_spider/settings.py +80 -0
  73. tests/baidu_spider/spiders/__init__.py +7 -0
  74. tests/baidu_spider/spiders/bai_du.py +61 -0
  75. tests/baidu_spider/spiders/sina.py +79 -0
  76. crawlo/filters/redis_filter.py +0 -120
  77. crawlo-1.0.2.dist-info/RECORD +0 -68
  78. {crawlo-1.0.2.dist-info → crawlo-1.0.4.dist-info}/WHEEL +0 -0
  79. {crawlo-1.0.2.dist-info → crawlo-1.0.4.dist-info}/entry_points.txt +0 -0
@@ -1,44 +1,44 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from crawlo import event
4
- from crawlo.utils.date_tools import get_current_time, time_diff_seconds
5
-
6
-
7
- class LogStats(object):
8
-
9
- def __init__(self, stats):
10
- self._stats = stats
11
-
12
- @classmethod
13
- def create_instance(cls, crawler):
14
- o = cls(crawler.stats)
15
- crawler.subscriber.subscribe(o.spider_opened, event=event.spider_opened)
16
- crawler.subscriber.subscribe(o.spider_closed, event=event.spider_closed)
17
- crawler.subscriber.subscribe(o.item_successful, event=event.item_successful)
18
- crawler.subscriber.subscribe(o.item_discard, event=event.item_discard)
19
- crawler.subscriber.subscribe(o.response_received, event=event.response_received)
20
- crawler.subscriber.subscribe(o.request_scheduled, event=event.request_scheduled)
21
-
22
- return o
23
-
24
- async def spider_opened(self):
25
- self._stats['start_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
26
-
27
- async def spider_closed(self):
28
- self._stats['end_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
29
- self._stats['cost_time(s)'] = time_diff_seconds(start_time=self._stats['start_time'], end_time=self._stats['end_time'])
30
-
31
- async def item_successful(self, _item, _spider):
32
- self._stats.inc_value('item_successful_count')
33
-
34
- async def item_discard(self, _item, exc, _spider):
35
- self._stats.inc_value('item_discard_count')
36
- reason = exc.msg
37
- if reason:
38
- self._stats.inc_value(f"item_discard/{reason}")
39
-
40
- async def response_received(self, _response, _spider):
41
- self._stats.inc_value('response_received_count')
42
-
43
- async def request_scheduled(self, _request, _spider):
44
- self._stats.inc_value('request_scheduler_count')
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo import event
4
+ from crawlo.utils.date_tools import get_current_time, time_diff_seconds
5
+
6
+
7
+ class LogStats(object):
8
+
9
+ def __init__(self, stats):
10
+ self._stats = stats
11
+
12
+ @classmethod
13
+ def create_instance(cls, crawler):
14
+ o = cls(crawler.stats)
15
+ crawler.subscriber.subscribe(o.spider_opened, event=event.spider_opened)
16
+ crawler.subscriber.subscribe(o.spider_closed, event=event.spider_closed)
17
+ crawler.subscriber.subscribe(o.item_successful, event=event.item_successful)
18
+ crawler.subscriber.subscribe(o.item_discard, event=event.item_discard)
19
+ crawler.subscriber.subscribe(o.response_received, event=event.response_received)
20
+ crawler.subscriber.subscribe(o.request_scheduled, event=event.request_scheduled)
21
+
22
+ return o
23
+
24
+ async def spider_opened(self):
25
+ self._stats['start_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
26
+
27
+ async def spider_closed(self):
28
+ self._stats['end_time'] = get_current_time(fmt='%Y-%m-%d %H:%M:%S')
29
+ self._stats['cost_time(s)'] = time_diff_seconds(start_time=self._stats['start_time'], end_time=self._stats['end_time'])
30
+
31
+ async def item_successful(self, _item, _spider):
32
+ self._stats.inc_value('item_successful_count')
33
+
34
+ async def item_discard(self, _item, exc, _spider):
35
+ self._stats.inc_value('item_discard_count')
36
+ reason = exc.msg
37
+ if reason:
38
+ self._stats.inc_value(f"item_discard/{reason}")
39
+
40
+ async def response_received(self, _response, _spider):
41
+ self._stats.inc_value('response_received_count')
42
+
43
+ async def request_scheduled(self, _request, _spider):
44
+ self._stats.inc_value('request_scheduler_count')
@@ -1,37 +1,37 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from abc import ABC, abstractmethod
4
-
5
- from crawlo import Request
6
- from crawlo.utils.request import request_fingerprint
7
-
8
-
9
- class BaseFilter(ABC):
10
-
11
- def __init__(self, logger, stats, debug: bool):
12
- self.logger = logger
13
- self.stats = stats
14
- self.debug = debug
15
-
16
- @classmethod
17
- def create_instance(cls, *args, **kwargs) -> 'BaseFilter':
18
- return cls(*args, **kwargs)
19
-
20
- def requested(self, request: Request):
21
- fp = request_fingerprint(request)
22
- if fp in self:
23
- return True
24
- self.add_fingerprint(fp)
25
- return False
26
-
27
- @abstractmethod
28
- def add_fingerprint(self, fp) -> None:
29
- pass
30
-
31
- def log_stats(self, request: Request) -> None:
32
- if self.debug:
33
- self.logger.debug(f'Filtered duplicate request: {request}')
34
- self.stats.inc_value(f'{self}/filtered_count')
35
-
36
- def __str__(self) -> str:
37
- return f'{self.__class__.__name__}'
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from abc import ABC, abstractmethod
4
+
5
+ from crawlo import Request
6
+ from crawlo.utils.request import request_fingerprint
7
+
8
+
9
+ class BaseFilter(ABC):
10
+
11
+ def __init__(self, logger, stats, debug: bool):
12
+ self.logger = logger
13
+ self.stats = stats
14
+ self.debug = debug
15
+
16
+ @classmethod
17
+ def create_instance(cls, *args, **kwargs) -> 'BaseFilter':
18
+ return cls(*args, **kwargs)
19
+
20
+ def requested(self, request: Request):
21
+ fp = request_fingerprint(request)
22
+ if fp in self:
23
+ return True
24
+ self.add_fingerprint(fp)
25
+ return False
26
+
27
+ @abstractmethod
28
+ def add_fingerprint(self, fp) -> None:
29
+ pass
30
+
31
+ def log_stats(self, request: Request) -> None:
32
+ if self.debug:
33
+ self.logger.debug(f'Filtered duplicate request: {request}')
34
+ self.stats.inc_value(f'{self}/filtered_count')
35
+
36
+ def __str__(self) -> str:
37
+ return f'{self.__class__.__name__}'
@@ -1,130 +1,150 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import Optional
4
-
5
- import aioredis
6
-
7
- from crawlo import Request
8
- from crawlo.filters import BaseFilter
9
- from crawlo.utils.log import get_logger
10
- from crawlo.utils.request import request_fingerprint
11
-
12
-
13
- class AioRedisFilter(BaseFilter):
14
- """使用Redis集合实现的异步请求去重过滤器(适用于分布式爬虫)"""
15
-
16
- def __init__(
17
- self,
18
- redis_key: str,
19
- client: aioredis.Redis,
20
- stats: dict,
21
- debug: bool,
22
- log_level: str,
23
- cleanup_fp: bool = False
24
- ):
25
- """
26
- 初始化过滤器
27
-
28
- 参数说明:
29
- redis_key: Redis中存储指纹的键名
30
- client: aioredis客户端实例
31
- stats: 统计信息字典
32
- debug: 是否启用调试模式
33
- log_level: 日志级别
34
- save_fp: 爬虫关闭时是否保留指纹数据
35
- """
36
- # 初始化日志记录器(使用类名作为日志标识)
37
- self.logger = get_logger(self.__class__.__name__, log_level)
38
- super().__init__(self.logger, stats, debug)
39
-
40
- self.redis_key = redis_key # Redis存储键(如:"project:request_fingerprints")
41
- self.redis = client # Redis异步客户端
42
- self.cleanup_fp = cleanup_fp # 是否持久化指纹数据
43
-
44
- @classmethod
45
- def create_instance(cls, crawler) -> 'BaseFilter':
46
- """从爬虫配置创建过滤器实例(工厂方法)"""
47
- # 从配置获取Redis连接参数(带默认值)
48
- redis_url = crawler.settings.get('REDIS_URL', 'redis://localhost:6379')
49
- decode_responses = crawler.settings.get_bool('DECODE_RESPONSES', True)
50
-
51
- try:
52
- # 创建Redis连接池(限制最大连接数20)
53
- redis_client = aioredis.from_url(
54
- redis_url,
55
- decode_responses=decode_responses,
56
- max_connections=20
57
- )
58
- except Exception as e:
59
- raise RuntimeError(f"Redis连接失败 {redis_url}: {str(e)}")
60
-
61
- # 使用项目名+配置键组合作为Redis键
62
- return cls(
63
- redis_key=f"{crawler.settings.get('PROJECT_NAME')}:{crawler.settings.get('REDIS_KEY', 'request_fingerprints')}",
64
- client=redis_client,
65
- stats=crawler.stats,
66
- cleanup_fp=crawler.settings.get_bool('CLEANUP_FP', False),
67
- debug=crawler.settings.get_bool('FILTER_DEBUG', False),
68
- log_level=crawler.settings.get('LOG_LEVEL', 'INFO')
69
- )
70
-
71
- async def requested(self, request: Request) -> bool:
72
- """
73
- 检查请求是否重复
74
-
75
- 参数:
76
- request: 要检查的请求对象
77
-
78
- 返回:
79
- bool: True表示重复请求,False表示新请求
80
- """
81
- fp = request_fingerprint(request) # 生成请求指纹
82
- try:
83
- # 检查指纹是否已存在集合中
84
- is_duplicate = await self.redis.sismember(self.redis_key, fp)
85
- if is_duplicate:
86
- # self.logger.debug(f"发现重复请求: {fp}")
87
- return True
88
-
89
- # 新请求则添加指纹
90
- await self.add_fingerprint(fp)
91
- return False
92
- except aioredis.RedisError as e:
93
- self.logger.error(f"Redis操作失败: {str(e)}")
94
- raise # 向上抛出异常
95
-
96
- async def add_fingerprint(self, fp: str) -> None:
97
- """向Redis集合添加新指纹"""
98
- try:
99
- await self.redis.sadd(self.redis_key, fp)
100
- self.logger.debug(f"新增指纹: {fp}")
101
- except aioredis.RedisError as e:
102
- self.logger.error(f"指纹添加失败: {str(e)}")
103
- raise
104
-
105
- async def closed(self, reason: Optional[str] = None) -> None:
106
- """
107
- 爬虫关闭时的处理(兼容Scrapy的关闭逻辑)
108
-
109
- 参数:
110
- reason: 爬虫关闭原因(Scrapy标准参数)
111
- """
112
- if self.cleanup_fp: # 仅在配置明确要求时清理
113
- try:
114
- deleted = await self.redis.delete(self.redis_key)
115
- self.logger.info(
116
- f"Cleaned {deleted} fingerprints from {self.redis_key} "
117
- f"(reason: {reason or 'manual'})"
118
- )
119
- except aioredis.RedisError as e:
120
- self.logger.warning(f"Cleanup failed: {e}")
121
- finally:
122
- await self._close_redis()
123
-
124
- async def _close_redis(self) -> None:
125
- """安全关闭Redis连接"""
126
- try:
127
- await self.redis.close()
128
- await self.redis.connection_pool.disconnect()
129
- except Exception as e:
130
- self.logger.warning(f"Redis close error: {e}")
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import Optional
4
+ import aioredis
5
+ from crawlo import Request
6
+ from crawlo.filters import BaseFilter
7
+ from crawlo.utils.log import get_logger
8
+ from crawlo.utils.request import request_fingerprint
9
+
10
+
11
+ class AioRedisFilter(BaseFilter):
12
+ """基于Redis集合实现的异步请求去重过滤器(支持分布式爬虫),提供TTL和清理控制"""
13
+
14
+ def __init__(
15
+ self,
16
+ redis_key: str,
17
+ client: aioredis.Redis,
18
+ stats: dict,
19
+ debug: bool,
20
+ log_level: str,
21
+ cleanup_fp: bool = False,
22
+ ttl: Optional[int] = None # None表示持久化,>0表示过期时间(秒)
23
+ ):
24
+ """初始化过滤器"""
25
+ self.logger = get_logger(self.__class__.__name__, log_level)
26
+ super().__init__(self.logger, stats, debug)
27
+
28
+ self.redis_key = redis_key
29
+ self.redis = client
30
+ self.cleanup_fp = cleanup_fp
31
+ self.ttl = ttl
32
+
33
+ @classmethod
34
+ def create_instance(cls, crawler) -> 'BaseFilter':
35
+ """从爬虫配置创建过滤器实例"""
36
+ redis_url = crawler.settings.get('REDIS_URL', 'redis://localhost:6379')
37
+ decode_responses = crawler.settings.get_bool('DECODE_RESPONSES', False)
38
+ ttl_setting = crawler.settings.get_int('REDIS_TTL')
39
+
40
+ # 处理TTL设置
41
+ ttl = None
42
+ if ttl_setting is not None:
43
+ ttl = max(0, int(ttl_setting)) if ttl_setting > 0 else None
44
+
45
+ try:
46
+ redis_client = aioredis.from_url(
47
+ redis_url,
48
+ decode_responses=decode_responses,
49
+ max_connections=20,
50
+ encoding='utf-8'
51
+ )
52
+ except Exception as e:
53
+ raise RuntimeError(f"Redis连接失败: {redis_url} - {str(e)}")
54
+
55
+ return cls(
56
+ redis_key=f"{crawler.settings.get('PROJECT_NAME', 'default')}:{crawler.settings.get('REDIS_KEY', 'request_fingerprints')}",
57
+ client=redis_client,
58
+ stats=crawler.stats,
59
+ cleanup_fp=crawler.settings.get_bool('CLEANUP_FP', False),
60
+ ttl=ttl,
61
+ debug=crawler.settings.get_bool('FILTER_DEBUG', False),
62
+ log_level=crawler.settings.get('LOG_LEVEL', 'INFO')
63
+ )
64
+
65
+ async def requested(self, request: Request) -> bool:
66
+ """检查请求是否已存在"""
67
+ try:
68
+ fp = str(request_fingerprint(request))
69
+
70
+ # 1. 检查指纹是否存在
71
+ pipe = self.redis.pipeline()
72
+ pipe.sismember(self.redis_key, fp) # 不单独 await
73
+ exists = (await pipe.execute())[0] # 执行并获取结果
74
+
75
+ if exists: # 如果已存在,返回 True
76
+ return True
77
+
78
+ # 2. 如果不存在,添加指纹并设置 TTL
79
+ pipe = self.redis.pipeline()
80
+ pipe.sadd(self.redis_key, fp) # 不单独 await
81
+ if self.ttl and self.ttl > 0:
82
+ pipe.expire(self.redis_key, self.ttl) # 不单独 await
83
+ await pipe.execute() # 一次性执行所有命令
84
+
85
+ return False # 表示是新请求
86
+
87
+ except Exception as e:
88
+ self.logger.error(f"请求检查失败: {getattr(request, 'url', '未知URL')}")
89
+ raise
90
+
91
+ async def add_fingerprint(self, fp: str) -> bool:
92
+ """添加新指纹到Redis集合"""
93
+ try:
94
+ fp = str(fp)
95
+ added = await self.redis.sadd(self.redis_key, fp)
96
+
97
+ if self.ttl and self.ttl > 0:
98
+ await self.redis.expire(self.redis_key, self.ttl)
99
+
100
+ return added == 1
101
+ except Exception as e:
102
+ self.logger.error("添加指纹失败")
103
+ raise
104
+
105
+ async def get_stats(self) -> dict:
106
+ """获取过滤器统计信息"""
107
+ try:
108
+ count = await self.redis.scard(self.redis_key)
109
+ stats = {
110
+ '指纹总数': count,
111
+ 'Redis键名': self.redis_key,
112
+ 'TTL配置': f"{self.ttl}秒" if self.ttl else "持久化"
113
+ }
114
+ stats.update(self.stats)
115
+ return stats
116
+ except Exception as e:
117
+ self.logger.error("获取统计信息失败")
118
+ return self.stats
119
+
120
+ async def clear_all(self) -> int:
121
+ """清空所有指纹数据"""
122
+ try:
123
+ deleted = await self.redis.delete(self.redis_key)
124
+ self.logger.info(f"已清除指纹数: {deleted}")
125
+ return deleted
126
+ except Exception as e:
127
+ self.logger.error("清空指纹失败")
128
+ raise
129
+
130
+ async def closed(self, reason: Optional[str] = None) -> None:
131
+ """爬虫关闭时的清理操作"""
132
+ try:
133
+ if self.cleanup_fp:
134
+ deleted = await self.redis.delete(self.redis_key)
135
+ self.logger.info(f"爬虫关闭清理: 已删除{deleted}个指纹")
136
+ else:
137
+ count = await self.redis.scard(self.redis_key)
138
+ ttl_info = f"{self.ttl}秒" if self.ttl else "持久化"
139
+ self.logger.info(f"保留指纹数: {count} (TTL: {ttl_info})")
140
+ finally:
141
+ await self._close_redis()
142
+
143
+ async def _close_redis(self) -> None:
144
+ """安全关闭Redis连接"""
145
+ try:
146
+ if hasattr(self.redis, 'close'):
147
+ await self.redis.close()
148
+ self.logger.debug("Redis连接已关闭")
149
+ except Exception as e:
150
+ self.logger.warning(f"Redis关闭时出错:{e}")