crawlo 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (59) hide show
  1. crawlo/__init__.py +5 -0
  2. crawlo/__version__.py +2 -0
  3. crawlo/core/__init__.py +2 -0
  4. crawlo/core/engine.py +157 -0
  5. crawlo/core/processor.py +40 -0
  6. crawlo/core/scheduler.py +35 -0
  7. crawlo/crawler.py +107 -0
  8. crawlo/downloader/__init__.py +78 -0
  9. crawlo/downloader/aiohttp_downloader.py +96 -0
  10. crawlo/downloader/httpx_downloader.py +48 -0
  11. crawlo/event.py +11 -0
  12. crawlo/exceptions.py +64 -0
  13. crawlo/extension/__init__.py +31 -0
  14. crawlo/extension/log_interval.py +49 -0
  15. crawlo/extension/log_stats.py +44 -0
  16. crawlo/items/__init__.py +24 -0
  17. crawlo/items/items.py +88 -0
  18. crawlo/middleware/__init__.py +21 -0
  19. crawlo/middleware/default_header.py +32 -0
  20. crawlo/middleware/download_delay.py +28 -0
  21. crawlo/middleware/middleware_manager.py +140 -0
  22. crawlo/middleware/request_ignore.py +30 -0
  23. crawlo/middleware/response_code.py +19 -0
  24. crawlo/middleware/response_filter.py +26 -0
  25. crawlo/middleware/retry.py +84 -0
  26. crawlo/network/__init__.py +7 -0
  27. crawlo/network/request.py +52 -0
  28. crawlo/network/response.py +93 -0
  29. crawlo/pipelines/__init__.py +13 -0
  30. crawlo/pipelines/console_pipeline.py +20 -0
  31. crawlo/pipelines/mongo_pipeline.py +5 -0
  32. crawlo/pipelines/mysql_pipeline.py +5 -0
  33. crawlo/pipelines/pipeline_manager.py +56 -0
  34. crawlo/settings/__init__.py +7 -0
  35. crawlo/settings/default_settings.py +39 -0
  36. crawlo/settings/setting_manager.py +100 -0
  37. crawlo/spider/__init__.py +36 -0
  38. crawlo/stats_collector.py +47 -0
  39. crawlo/subscriber.py +27 -0
  40. crawlo/task_manager.py +27 -0
  41. crawlo/templates/item_template.tmpl +22 -0
  42. crawlo/templates/project_template/items/__init__.py +0 -0
  43. crawlo/templates/project_template/main.py +33 -0
  44. crawlo/templates/project_template/setting.py +190 -0
  45. crawlo/templates/project_template/spiders/__init__.py +0 -0
  46. crawlo/templates/spider_template.tmpl +31 -0
  47. crawlo/utils/__init__.py +7 -0
  48. crawlo/utils/date_tools.py +20 -0
  49. crawlo/utils/func_tools.py +22 -0
  50. crawlo/utils/log.py +39 -0
  51. crawlo/utils/pqueue.py +16 -0
  52. crawlo/utils/project.py +58 -0
  53. crawlo/utils/system.py +11 -0
  54. crawlo-1.0.0.dist-info/METADATA +36 -0
  55. crawlo-1.0.0.dist-info/RECORD +59 -0
  56. crawlo-1.0.0.dist-info/WHEEL +5 -0
  57. crawlo-1.0.0.dist-info/entry_points.txt +2 -0
  58. crawlo-1.0.0.dist-info/licenses/LICENSE +23 -0
  59. crawlo-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,49 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import asyncio
4
+
5
+ from crawlo.utils.log import get_logger
6
+ from crawlo.event import spider_opened, spider_closed
7
+
8
+
9
+ class LogIntervalExtension(object):
10
+
11
+ def __init__(self, crawler):
12
+ self.task = None
13
+ self.stats = crawler.stats
14
+ self.item_count = 0
15
+ self.response_count = 0
16
+ self.seconds = crawler.settings.get('INTERVAL')
17
+ self.interval = int(self.seconds / 60) if self.seconds % 60 == 0 else self.seconds
18
+ self.interval = "" if self.interval == 1 else self.interval
19
+ self.unit = 'min' if self.seconds % 60 == 0 else 's'
20
+
21
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
22
+
23
+ @classmethod
24
+ def create_instance(cls, crawler):
25
+ o = cls(crawler)
26
+ crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
27
+ crawler.subscriber.subscribe(o.spider_closed, event=spider_closed)
28
+ return o
29
+
30
+ async def spider_opened(self):
31
+ self.task = asyncio.create_task(self.interval_log())
32
+ await self.task
33
+
34
+ async def spider_closed(self):
35
+ if self.task:
36
+ self.task.cancel()
37
+
38
+ async def interval_log(self):
39
+ while True:
40
+ last_item_count = self.stats.get_value('item_successful_count', default=0)
41
+ last_response_count = self.stats.get_value('response_received_count', default=0)
42
+ item_rate = last_item_count - self.item_count
43
+ response_rate = last_response_count - self.response_count
44
+ self.item_count, self.response_count = last_item_count, last_response_count
45
+ self.logger.info(
46
+ f'Crawled {last_response_count} pages (at {response_rate} pages/{self.interval}{self.unit}),'
47
+ f' Got {last_item_count} items (at {item_rate} items/{self.interval}{self.unit}).'
48
+ )
49
+ await asyncio.sleep(self.seconds)
@@ -0,0 +1,44 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo import event
4
+ from crawlo.utils.date_tools import now, date_delta
5
+
6
+
7
+ class LogStats(object):
8
+
9
+ def __init__(self, stats):
10
+ self._stats = stats
11
+
12
+ @classmethod
13
+ def create_instance(cls, crawler):
14
+ o = cls(crawler.stats)
15
+ crawler.subscriber.subscribe(o.spider_opened, event=event.spider_opened)
16
+ crawler.subscriber.subscribe(o.spider_closed, event=event.spider_closed)
17
+ crawler.subscriber.subscribe(o.item_successful, event=event.item_successful)
18
+ crawler.subscriber.subscribe(o.item_discard, event=event.item_discard)
19
+ crawler.subscriber.subscribe(o.response_received, event=event.response_received)
20
+ crawler.subscriber.subscribe(o.request_scheduled, event=event.request_scheduled)
21
+
22
+ return o
23
+
24
+ async def spider_opened(self):
25
+ self._stats['start_time'] = now()
26
+
27
+ async def spider_closed(self):
28
+ self._stats['end_time'] = now()
29
+ self._stats['cost_time(s)'] = date_delta(start=self._stats['start_time'], end=self._stats['end_time'])
30
+
31
+ async def item_successful(self, _item, _spider):
32
+ self._stats.inc_value('item_successful_count')
33
+
34
+ async def item_discard(self, _item, exc, _spider):
35
+ self._stats.inc_value('item_discard_count')
36
+ reason = exc.msg
37
+ if reason:
38
+ self._stats.inc_value(f"item_discard/{reason}")
39
+
40
+ async def response_received(self, _response, _spider):
41
+ self._stats.inc_value('response_received_count')
42
+
43
+ async def request_scheduled(self, _request, _spider):
44
+ self._stats.inc_value('request_scheduler_count')
@@ -0,0 +1,24 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from abc import ABCMeta
4
+
5
+
6
+ class Field(dict):
7
+ pass
8
+
9
+
10
+ class ItemMeta(ABCMeta):
11
+ """
12
+ 元类
13
+ """
14
+ def __new__(mcs, name, bases, attrs):
15
+ field = {}
16
+ cls_attr = {}
17
+ for k, v in attrs.items():
18
+ if isinstance(v, Field):
19
+ field[k] = v
20
+ else:
21
+ cls_attr[k] = v
22
+ cls_instance = super().__new__(mcs, name, bases, attrs)
23
+ cls_instance.FIELDS = field
24
+ return cls_instance
crawlo/items/items.py ADDED
@@ -0,0 +1,88 @@
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ from copy import deepcopy
4
+ from pprint import pformat
5
+ from typing import Any, Iterator, Dict
6
+ from collections.abc import MutableMapping
7
+
8
+ from crawlo.items import ItemMeta, Field
9
+ from crawlo.exceptions import ItemInitError, ItemAttributeError
10
+
11
+
12
+ class Item(MutableMapping, metaclass=ItemMeta):
13
+ FIELDS: Dict[str, Any] = {}
14
+
15
+ def __init__(self, *args, **kwargs):
16
+ if args:
17
+ raise ItemInitError(f"{self.__class__.__name__} 不支持位置参数:{args},请使用关键字参数初始化。")
18
+ if kwargs:
19
+ for key, value in kwargs.items():
20
+ self[key] = value
21
+
22
+ self._values: Dict[str, Any] = {}
23
+
24
+ def __getitem__(self, item: str) -> Any:
25
+ return self._values[item]
26
+
27
+ def __setitem__(self, key: str, value: Any) -> None:
28
+ if key not in self.FIELDS:
29
+ raise KeyError(f"{self.__class__.__name__} 不包含字段:{key}")
30
+ self._values[key] = value
31
+
32
+ def __delitem__(self, key: str) -> None:
33
+ del self._values[key]
34
+
35
+ def __setattr__(self, key: str, value: Any) -> None:
36
+ if not key.startswith("_"):
37
+ raise AttributeError(
38
+ f"设置字段值请使用 item[{key!r}] = {value!r}"
39
+ )
40
+ super().__setattr__(key, value)
41
+
42
+ def __getattr__(self, item: str) -> Any:
43
+ # 当获取不到属性时触发
44
+ raise AttributeError(
45
+ f"{self.__class__.__name__} 不支持字段:{item}。"
46
+ f"请先在 `{self.__class__.__name__}` 中声明该字段,再通过 item[{item!r}] 获取。"
47
+ )
48
+
49
+ def __getattribute__(self, item: str) -> Any:
50
+ # 属性拦截器,只要访问属性就会进入该方法
51
+ try:
52
+ field = super().__getattribute__("FIELDS")
53
+ if isinstance(field, dict) and item in field:
54
+ raise ItemAttributeError(
55
+ f"获取字段值请使用 item[{item!r}]"
56
+ )
57
+ except AttributeError:
58
+ pass # 如果 FIELDS 尚未定义,继续执行后续逻辑
59
+ return super().__getattribute__(item)
60
+
61
+ def __repr__(self) -> str:
62
+ return pformat(dict(self))
63
+
64
+ __str__ = __repr__
65
+
66
+ def __iter__(self) -> Iterator[str]:
67
+ return iter(self._values)
68
+
69
+ def __len__(self) -> int:
70
+ return len(self._values)
71
+
72
+ def to_dict(self) -> Dict[str, Any]:
73
+ return dict(self)
74
+
75
+ def copy(self) -> "Item":
76
+ return deepcopy(self)
77
+
78
+
79
+ if __name__ == '__main__':
80
+ class TestItem(Item):
81
+ url = Field()
82
+ title = Field()
83
+
84
+ test_item = TestItem()
85
+ test_item['title'] = '百度首页'
86
+ test_item['url'] = 'http://example.com'
87
+ # test_item.title = 'fffff'
88
+ print(test_item.title)
@@ -0,0 +1,21 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo import Request, Response
4
+
5
+
6
+ class BaseMiddleware(object):
7
+ def process_request(self, request, spider) -> None | Request | Response:
8
+ # 请求预处理
9
+ pass
10
+
11
+ def process_response(self, request, response, spider) -> Request | Response:
12
+ # 响应预处理
13
+ pass
14
+
15
+ def process_exception(self, request, exp, spider) -> None | Request | Response:
16
+ # 异常预处理
17
+ pass
18
+
19
+ @classmethod
20
+ def create_instance(cls, crawler):
21
+ return cls()
@@ -0,0 +1,32 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo.event import spider_opened
4
+
5
+
6
+ class DefaultHeaderMiddleware(object):
7
+
8
+ def __init__(self, user_agent, headers, spider):
9
+ self.user_agent = user_agent
10
+ self.headers = headers
11
+ self.spider = spider
12
+
13
+ @classmethod
14
+ def create_instance(cls, crawler):
15
+ o = cls(
16
+ user_agent=crawler.settings.get('USER_AGENT'),
17
+ headers=crawler.settings.get_dict('DEFAULT_HEADERS'),
18
+ spider=crawler.spider
19
+ )
20
+ crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
21
+ return o
22
+
23
+ async def spider_opened(self):
24
+ self.user_agent = getattr(self.spider, 'user_agent', self.user_agent)
25
+ self.headers = getattr(self.spider, 'headers', self.headers)
26
+ if self.user_agent:
27
+ self.headers.setdefault('User-Agent', self.user_agent)
28
+
29
+ def process_request(self, request, _spider):
30
+ if self.headers:
31
+ for key, value in self.headers.items():
32
+ request.headers.setdefault(key, value)
@@ -0,0 +1,28 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from asyncio import sleep
4
+ from random import uniform
5
+ from crawlo.utils.log import get_logger
6
+ from crawlo.exceptions import NotConfiguredError
7
+
8
+
9
+ class DownloadDelayMiddleware(object):
10
+
11
+ def __init__(self, settings, log_level):
12
+ self.delay = settings.get_float("DOWNLOAD_DELAY")
13
+ if not self.delay:
14
+ raise NotConfiguredError
15
+ self.randomness = settings.get_bool("RANDOMNESS")
16
+ self.floor, self.upper = settings.get_list("RANDOM_RANGE")
17
+ self.logger = get_logger(self.__class__.__name__, log_level)
18
+
19
+ @classmethod
20
+ def create_instance(cls, crawler):
21
+ o = cls(settings=crawler.settings, log_level=crawler.settings.get('LOG_LEVEL'))
22
+ return o
23
+
24
+ async def process_request(self, _request, _spider):
25
+ if self.randomness:
26
+ await sleep(uniform(self.delay * self.floor, self.delay * self.upper))
27
+ else:
28
+ await sleep(self.delay)
@@ -0,0 +1,140 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from pprint import pformat
4
+ from types import MethodType
5
+ from asyncio import create_task
6
+ from collections import defaultdict
7
+ from typing import List, Dict, Callable, Optional
8
+
9
+ from crawlo import Request, Response
10
+ from crawlo.utils.log import get_logger
11
+ from crawlo.utils.project import load_class
12
+ from crawlo.middleware import BaseMiddleware
13
+ from crawlo.utils.project import common_call
14
+ from crawlo.event import ignore_request, response_received
15
+ from crawlo.exceptions import MiddlewareInitError, InvalidOutputError, RequestMethodError, IgnoreRequestError, \
16
+ NotConfiguredError
17
+
18
+
19
+ class MiddlewareManager:
20
+
21
+ def __init__(self, crawler):
22
+ self.crawler = crawler
23
+ self.logger = get_logger(self.__class__.__name__, crawler.settings.get('LOG_LEVEL'))
24
+ self.middlewares: List = []
25
+ self.methods: Dict[str, List[MethodType]] = defaultdict(list)
26
+ middlewares = self.crawler.settings.get_list('MIDDLEWARES')
27
+ self._add_middleware(middlewares)
28
+ self._add_method()
29
+
30
+ self.download_method: Callable = crawler.engine.downloader.download
31
+ self._stats = crawler.stats
32
+
33
+ async def _process_request(self, request: Request):
34
+ for method in self.methods['process_request']:
35
+ result = await common_call(method, request, self.crawler.spider)
36
+ if result is None:
37
+ continue
38
+ if isinstance(result, (Request, Response)):
39
+ return result
40
+ raise InvalidOutputError(
41
+ f"{method.__self__.__class__.__name__}. must return None or Request or Response, got {type(result).__name__}"
42
+ )
43
+ return await self.download_method(request)
44
+
45
+ async def _process_response(self, request: Request, response: Response):
46
+ for method in reversed(self.methods['process_response']):
47
+ try:
48
+ response = await common_call(method, request, response, self.crawler.spider)
49
+ except IgnoreRequestError as exp:
50
+ create_task(self.crawler.subscriber.notify(ignore_request, exp, request, self.crawler.spider))
51
+ # self.logger.info(f'{request} ignored.')
52
+ # self._stats.inc_value('request_ignore_count')
53
+ # reason = exp.msg
54
+ # if reason:
55
+ # self._stats.inc_value(f'request_ignore_count/{reason}')
56
+ if isinstance(response, Request):
57
+ return response
58
+ if isinstance(response, Response):
59
+ continue
60
+ raise InvalidOutputError(
61
+ f"{method.__self__.__class__.__name__}. must return Request or Response, got {type(response).__name__}"
62
+ )
63
+ return response
64
+
65
+ async def _process_exception(self, request: Request, exp: Exception):
66
+ for method in self.methods['process_exception']:
67
+ response = await common_call(method, request, exp, self.crawler.spider)
68
+ if response is None:
69
+ continue
70
+ if isinstance(response, (Request, Response)):
71
+ return response
72
+ if response:
73
+ break
74
+ raise InvalidOutputError(
75
+ f"{method.__self__.__class__.__name__}. must return None or Request or Response, got {type(response).__name__}"
76
+ )
77
+ else:
78
+ raise exp
79
+
80
+ async def download(self, request) -> Optional[Response]:
81
+ """ called in the download method. """
82
+ try:
83
+ response = await self._process_request(request)
84
+ except KeyError:
85
+ raise RequestMethodError(f"{request.method.lower()} is not supported")
86
+ except IgnoreRequestError as exp:
87
+ create_task(self.crawler.subscriber.notify(ignore_request, exp, request, self.crawler.spider))
88
+ response = await self._process_exception(request, exp)
89
+ except Exception as exp:
90
+ self._stats.inc_value(f'download_error/{exp.__class__.__name__}')
91
+ response = await self._process_exception(request, exp)
92
+ else:
93
+ create_task(self.crawler.subscriber.notify(response_received, response, self.crawler.spider))
94
+ # self.crawler.stats.inc_value('response_received_count')
95
+ if isinstance(response, Response):
96
+ response = await self._process_response(request, response)
97
+ if isinstance(response, Request):
98
+ await self.crawler.engine.enqueue_request(request)
99
+ return None
100
+ return response
101
+
102
+ @classmethod
103
+ def create_instance(cls, *args, **kwargs):
104
+ return cls(*args, **kwargs)
105
+
106
+ def _add_middleware(self, middlewares):
107
+ enabled_middlewares = [m for m in middlewares if self._validate_middleware(m)]
108
+ if enabled_middlewares:
109
+ self.logger.info(f'enabled middleware:\n {pformat(enabled_middlewares)}')
110
+
111
+ def _validate_middleware(self, middleware):
112
+ middleware_cls = load_class(middleware)
113
+ if not hasattr(middleware_cls, 'create_instance'):
114
+ raise MiddlewareInitError(
115
+ f"Middleware init failed, must inherit from `BaseMiddleware` or have a `create_instance` method"
116
+ )
117
+ try:
118
+ instance = middleware_cls.create_instance(self.crawler)
119
+ self.middlewares.append(instance)
120
+ return True
121
+ except NotConfiguredError:
122
+ return False
123
+
124
+ def _add_method(self):
125
+ for middleware in self.middlewares:
126
+ if hasattr(middleware, 'process_request'):
127
+ if self._validate_middleware_method(method_name='process_request', middleware=middleware):
128
+ self.methods['process_request'].append(middleware.process_request)
129
+ if hasattr(middleware, 'process_response'):
130
+ if self._validate_middleware_method(method_name='process_response', middleware=middleware):
131
+ self.methods['process_response'].append(middleware.process_response)
132
+ if hasattr(middleware, 'process_exception'):
133
+ if self._validate_middleware_method(method_name='process_exception', middleware=middleware):
134
+ self.methods['process_exception'].append(middleware.process_exception)
135
+
136
+ @staticmethod
137
+ def _validate_middleware_method(method_name, middleware) -> bool:
138
+ method = getattr(type(middleware), method_name)
139
+ base_method = getattr(BaseMiddleware, method_name)
140
+ return False if method == base_method else True
@@ -0,0 +1,30 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo.utils.log import get_logger
4
+ from crawlo.exceptions import IgnoreRequestError
5
+ from crawlo.event import ignore_request
6
+
7
+
8
+ class RequestIgnoreMiddleware(object):
9
+
10
+ def __init__(self, stats, log_level):
11
+ self.logger = get_logger(self.__class__.__name__, log_level)
12
+ self.stats = stats
13
+
14
+ @classmethod
15
+ def create_instance(cls, crawler):
16
+ o = cls(stats=crawler.stats, log_level=crawler.settings.get('LOG_LEVEL'))
17
+ crawler.subscriber.subscribe(o.request_ignore, event=ignore_request)
18
+ return o
19
+
20
+ async def request_ignore(self, exc, request, _spider):
21
+ self.logger.info(f'{request} ignored.')
22
+ self.stats.inc_value('request_ignore_count')
23
+ reason = exc.msg
24
+ if reason:
25
+ self.stats.inc_value(f'request_ignore_count/{reason}')
26
+
27
+ @staticmethod
28
+ def process_exception(_request, exc, _spider):
29
+ if isinstance(exc, IgnoreRequestError):
30
+ return True
@@ -0,0 +1,19 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo.utils.log import get_logger
4
+
5
+
6
+ class ResponseCodeMiddleware(object):
7
+ def __init__(self, stats, log_level):
8
+ self.logger = get_logger(self.__class__.__name__, log_level)
9
+ self.stats = stats
10
+
11
+ @classmethod
12
+ def create_instance(cls, crawler):
13
+ o = cls(stats=crawler.stats, log_level=crawler.settings.get('LOG_LEVEL'))
14
+ return o
15
+
16
+ def process_response(self, request, response, spider):
17
+ self.stats.inc_value(f'stats_code/count/{response.status_code}')
18
+ self.logger.debug(f'Got response from <{response.status_code} {response.url}>')
19
+ return response
@@ -0,0 +1,26 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo.utils.log import get_logger
4
+ from crawlo.exceptions import IgnoreRequestError
5
+
6
+
7
+ class ResponseFilterMiddleware:
8
+
9
+ def __init__(self, allowed_codes, log_level):
10
+ self.allowed_codes = allowed_codes
11
+ self.logger = get_logger(self.__class__.__name__, log_level)
12
+
13
+ @classmethod
14
+ def create_instance(cls, crawler):
15
+ o = cls(
16
+ allowed_codes=crawler.settings.get_list('ALLOWED_CODES'),
17
+ log_level=crawler.settings.get('LOG_LEVEL')
18
+ )
19
+ return o
20
+
21
+ def process_response(self, request, response, spider):
22
+ if 200 <= response.status_code < 300:
23
+ return response
24
+ if response.status_code in self.allowed_codes:
25
+ return response
26
+ raise IgnoreRequestError(f"response status_code/non-200")
@@ -0,0 +1,84 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from typing import List
4
+ from anyio import EndOfStream
5
+ from httpcore import ReadError
6
+ from asyncio.exceptions import TimeoutError
7
+ from httpx import RemoteProtocolError, ConnectError, ReadTimeout
8
+ from aiohttp.client_exceptions import ClientConnectionError, ClientPayloadError
9
+ from aiohttp import ClientConnectorError, ClientTimeout, ClientConnectorSSLError, ClientResponseError
10
+
11
+ from crawlo.utils.log import get_logger
12
+ from crawlo.stats_collector import StatsCollector
13
+
14
+ _retry_exceptions = [
15
+ EndOfStream,
16
+ ReadError,
17
+ TimeoutError,
18
+ ConnectError,
19
+ ReadTimeout,
20
+ ClientConnectorError,
21
+ ClientResponseError,
22
+ RemoteProtocolError,
23
+ ClientTimeout,
24
+ ClientConnectorSSLError,
25
+ ClientPayloadError,
26
+ ClientConnectionError
27
+ ]
28
+
29
+
30
+ class RetryMiddleware(object):
31
+
32
+ def __init__(
33
+ self,
34
+ *,
35
+ retry_http_codes: List,
36
+ ignore_http_codes: List,
37
+ max_retry_times: int,
38
+ retry_exceptions: List,
39
+ stats: StatsCollector
40
+ ):
41
+ self.retry_http_codes = retry_http_codes
42
+ self.ignore_http_codes = ignore_http_codes
43
+ self.max_retry_times = max_retry_times
44
+ self.retry_exceptions = tuple(retry_exceptions + _retry_exceptions)
45
+ self.stats = stats
46
+ self.logger = get_logger(self.__class__.__name__)
47
+
48
+ @classmethod
49
+ def create_instance(cls, crawler):
50
+ o = cls(
51
+ retry_http_codes=crawler.settings.get_list('RETRY_HTTP_CODES'),
52
+ ignore_http_codes=crawler.settings.get_list('IGNORE_HTTP_CODES'),
53
+ max_retry_times=crawler.settings.get_int('MAX_RETRY_TIMES'),
54
+ retry_exceptions=crawler.settings.get_list('RETRY_EXCEPTIONS'),
55
+ stats=crawler.stats
56
+ )
57
+ return o
58
+
59
+ def process_response(self, request, response, spider):
60
+ if request.meta.get('dont_retry', False):
61
+ return response
62
+ if response.status_code in self.ignore_http_codes:
63
+ return response
64
+ if response.status_code in self.retry_http_codes:
65
+ # 重试逻辑
66
+ reason = f"response code {response.status_code}"
67
+ return self._retry(request, reason, spider) or response
68
+ return response
69
+
70
+ def process_exception(self, request, exc, spider):
71
+ if isinstance(exc, self.retry_exceptions) and not request.meta.get('dont_retry', False):
72
+ return self._retry(request=request, reason=type(exc).__name__, spider=spider)
73
+
74
+ def _retry(self, request, reason, spider):
75
+ retry_times = request.meta.get('retry_times', 0)
76
+ if retry_times < self.max_retry_times:
77
+ retry_times += 1
78
+ self.logger.info(f"{spider} {request} {reason} retrying {retry_times} time...")
79
+ request.meta['retry_times'] = retry_times
80
+ self.stats.inc_value("retry_count")
81
+ return request
82
+ else:
83
+ self.logger.warning(f"{spider} {request} {reason} retry max {self.max_retry_times} times, give up.")
84
+ return None
@@ -0,0 +1,7 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-02-05 14:07
5
+ # @Author : oscar
6
+ # @Desc : None
7
+ """
@@ -0,0 +1,52 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ import hashlib
4
+ from copy import deepcopy
5
+ from typing import Dict, Optional, Callable
6
+
7
+
8
+ class Request(object):
9
+
10
+ def __init__(
11
+ self,
12
+ url: str,
13
+ *,
14
+ callback: Optional[Callable] = None,
15
+ headers: Optional[Dict[str, str]] = None,
16
+ body: Optional[bytes] = None,
17
+ method: Optional[str] = 'GET',
18
+ cookies: Optional[Dict[str, str]] = None,
19
+ priority: int = 0,
20
+ encoding: Optional[str] = 'UTF-8',
21
+ meta: Optional[Dict[str, str]] = None
22
+
23
+ ):
24
+ self.url = url
25
+ self.callback = callback
26
+ self.headers = headers if headers else {}
27
+ self.body = body
28
+ self.method = method
29
+ self.cookies = cookies
30
+ self.priority = priority
31
+ self.encoding = encoding
32
+ self._meta = meta if meta is not None else {}
33
+
34
+ def copy(self):
35
+ return deepcopy(self)
36
+
37
+ def fingerprint(self) -> str:
38
+ data = f"{self.url}{self.method}{self.body or b''}".encode()
39
+ return hashlib.sha256(data).hexdigest()
40
+
41
+ def set_meta(self, key: str, value: str):
42
+ self._meta[key] = value
43
+
44
+ @property
45
+ def meta(self):
46
+ return self._meta
47
+
48
+ def __str__(self):
49
+ return f'<Request url={self.url}> method={self.method}>'
50
+
51
+ def __lt__(self, other):
52
+ return self.priority < other.priority