crawlo 1.0.4__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (95) hide show
  1. crawlo/__init__.py +25 -9
  2. crawlo/__version__.py +1 -1
  3. crawlo/core/__init__.py +2 -2
  4. crawlo/core/engine.py +158 -158
  5. crawlo/core/processor.py +40 -40
  6. crawlo/core/scheduler.py +57 -57
  7. crawlo/crawler.py +424 -242
  8. crawlo/downloader/__init__.py +78 -78
  9. crawlo/downloader/aiohttp_downloader.py +200 -259
  10. crawlo/downloader/cffi_downloader.py +277 -0
  11. crawlo/downloader/httpx_downloader.py +246 -187
  12. crawlo/event.py +11 -11
  13. crawlo/exceptions.py +73 -64
  14. crawlo/extension/__init__.py +31 -31
  15. crawlo/extension/log_interval.py +49 -49
  16. crawlo/extension/log_stats.py +44 -44
  17. crawlo/extension/logging_extension.py +35 -0
  18. crawlo/filters/__init__.py +37 -37
  19. crawlo/filters/aioredis_filter.py +150 -150
  20. crawlo/filters/memory_filter.py +202 -202
  21. crawlo/items/__init__.py +62 -62
  22. crawlo/items/items.py +115 -119
  23. crawlo/middleware/__init__.py +21 -21
  24. crawlo/middleware/default_header.py +32 -32
  25. crawlo/middleware/download_delay.py +28 -28
  26. crawlo/middleware/middleware_manager.py +135 -140
  27. crawlo/middleware/proxy.py +246 -0
  28. crawlo/middleware/request_ignore.py +30 -30
  29. crawlo/middleware/response_code.py +18 -18
  30. crawlo/middleware/response_filter.py +26 -26
  31. crawlo/middleware/retry.py +90 -90
  32. crawlo/network/__init__.py +7 -7
  33. crawlo/network/request.py +203 -204
  34. crawlo/network/response.py +166 -166
  35. crawlo/pipelines/__init__.py +13 -13
  36. crawlo/pipelines/console_pipeline.py +39 -39
  37. crawlo/pipelines/mongo_pipeline.py +116 -116
  38. crawlo/pipelines/mysql_batch_pipline.py +273 -134
  39. crawlo/pipelines/mysql_pipeline.py +195 -195
  40. crawlo/pipelines/pipeline_manager.py +56 -56
  41. crawlo/settings/__init__.py +7 -7
  42. crawlo/settings/default_settings.py +169 -94
  43. crawlo/settings/setting_manager.py +99 -99
  44. crawlo/spider/__init__.py +41 -36
  45. crawlo/stats_collector.py +59 -59
  46. crawlo/subscriber.py +106 -106
  47. crawlo/task_manager.py +27 -27
  48. crawlo/templates/item_template.tmpl +21 -21
  49. crawlo/templates/project_template/main.py +32 -32
  50. crawlo/templates/project_template/setting.py +189 -189
  51. crawlo/templates/spider_template.tmpl +30 -30
  52. crawlo/utils/__init__.py +7 -7
  53. crawlo/utils/concurrency_manager.py +124 -124
  54. crawlo/utils/date_tools.py +233 -177
  55. crawlo/utils/db_helper.py +344 -0
  56. crawlo/utils/func_tools.py +82 -82
  57. crawlo/utils/log.py +129 -39
  58. crawlo/utils/pqueue.py +173 -173
  59. crawlo/utils/project.py +59 -59
  60. crawlo/utils/request.py +267 -122
  61. crawlo/utils/system.py +11 -11
  62. crawlo/utils/tools.py +5 -303
  63. crawlo/utils/url.py +39 -39
  64. {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/METADATA +49 -48
  65. crawlo-1.0.5.dist-info/RECORD +84 -0
  66. {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/top_level.txt +1 -0
  67. examples/__init__.py +0 -0
  68. examples/gxb/__init__.py +0 -0
  69. examples/gxb/items.py +36 -0
  70. examples/gxb/run.py +15 -0
  71. examples/gxb/settings.py +71 -0
  72. examples/gxb/spider/__init__.py +0 -0
  73. examples/gxb/spider/miit_spider.py +180 -0
  74. examples/gxb/spider/telecom_device_licenses.py +129 -0
  75. tests/__init__.py +7 -7
  76. tests/test_proxy_health_check.py +33 -0
  77. tests/test_proxy_middleware_integration.py +137 -0
  78. tests/test_proxy_providers.py +57 -0
  79. tests/test_proxy_stats.py +20 -0
  80. tests/test_proxy_strategies.py +60 -0
  81. crawlo/downloader/playwright_downloader.py +0 -161
  82. crawlo-1.0.4.dist-info/RECORD +0 -79
  83. tests/baidu_spider/__init__.py +0 -7
  84. tests/baidu_spider/demo.py +0 -94
  85. tests/baidu_spider/items.py +0 -25
  86. tests/baidu_spider/middleware.py +0 -49
  87. tests/baidu_spider/pipeline.py +0 -55
  88. tests/baidu_spider/request_fingerprints.txt +0 -9
  89. tests/baidu_spider/run.py +0 -27
  90. tests/baidu_spider/settings.py +0 -80
  91. tests/baidu_spider/spiders/__init__.py +0 -7
  92. tests/baidu_spider/spiders/bai_du.py +0 -61
  93. tests/baidu_spider/spiders/sina.py +0 -79
  94. {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/WHEEL +0 -0
  95. {crawlo-1.0.4.dist-info → crawlo-1.0.5.dist-info}/entry_points.txt +0 -0
crawlo/items/items.py CHANGED
@@ -1,119 +1,115 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- from copy import deepcopy
4
- from pprint import pformat
5
- from typing import Any, Iterator, Dict
6
- from collections.abc import MutableMapping
7
-
8
- from crawlo.items import ItemMeta, Field
9
- from crawlo.exceptions import ItemInitError, ItemAttributeError
10
-
11
-
12
- class Item(MutableMapping, metaclass=ItemMeta):
13
- FIELDS: Dict[str, Any] = {}
14
-
15
- def __init__(self, *args, **kwargs):
16
- if args:
17
- raise ItemInitError(f"{self.__class__.__name__} 不支持位置参数:{args},请使用关键字参数初始化。")
18
- if kwargs:
19
- for key, value in kwargs.items():
20
- self[key] = value
21
-
22
- self._values: Dict[str, Any] = {}
23
-
24
- # 初始化字段,默认值填充
25
- for field_name, field_obj in self.FIELDS.items():
26
- if field_obj.default is not None:
27
- self._values[field_name] = field_obj.default
28
-
29
- # 覆盖默认值或设置新值
30
- for key, value in kwargs.items():
31
- self[key] = value
32
-
33
- def __getitem__(self, item: str) -> Any:
34
- return self._values[item]
35
-
36
- # def __setitem__(self, key: str, value: Any) -> None:
37
- # if key not in self.FIELDS:
38
- # raise KeyError(f"{self.__class__.__name__} 不包含字段:{key}")
39
- # self._values[key] = value
40
-
41
- def __setitem__(self, key: str, value: Any) -> None:
42
- if key not in self.FIELDS:
43
- raise KeyError(f"{self.__class__.__name__} 不包含字段:{key}")
44
-
45
- field = self.FIELDS[key]
46
- try:
47
- validated_value = field.validate(value, field_name=key)
48
- self._values[key] = validated_value
49
- except Exception as e:
50
- error_lines = [
51
- "",
52
- "【字段校验失败】",
53
- f"字段名称: {key}",
54
- f"数据类型: {type(value)}",
55
- f"原始值: {repr(value)}",
56
- f"是否允许空值: {field.nullable}",
57
- f"错误原因: {str(e)}",
58
- ""
59
- ]
60
- detailed_error = "\n".join(error_lines)
61
- raise type(e)(detailed_error) from e
62
-
63
- def __delitem__(self, key: str) -> None:
64
- del self._values[key]
65
-
66
- def __setattr__(self, key: str, value: Any) -> None:
67
- if not key.startswith("_"):
68
- raise AttributeError(
69
- f"设置字段值请使用 item[{key!r}] = {value!r}"
70
- )
71
- super().__setattr__(key, value)
72
-
73
- def __getattr__(self, item: str) -> Any:
74
- # 当获取不到属性时触发
75
- raise AttributeError(
76
- f"{self.__class__.__name__} 不支持字段:{item}。"
77
- f"请先在 `{self.__class__.__name__}` 中声明该字段,再通过 item[{item!r}] 获取。"
78
- )
79
-
80
- def __getattribute__(self, item: str) -> Any:
81
- # 属性拦截器,只要访问属性就会进入该方法
82
- try:
83
- field = super().__getattribute__("FIELDS")
84
- if isinstance(field, dict) and item in field:
85
- raise ItemAttributeError(
86
- f"获取字段值请使用 item[{item!r}]"
87
- )
88
- except AttributeError:
89
- pass # 如果 FIELDS 尚未定义,继续执行后续逻辑
90
- return super().__getattribute__(item)
91
-
92
- def __repr__(self) -> str:
93
- return pformat(dict(self))
94
-
95
- __str__ = __repr__
96
-
97
- def __iter__(self) -> Iterator[str]:
98
- return iter(self._values)
99
-
100
- def __len__(self) -> int:
101
- return len(self._values)
102
-
103
- def to_dict(self) -> Dict[str, Any]:
104
- return dict(self)
105
-
106
- def copy(self) -> "Item":
107
- return deepcopy(self)
108
-
109
-
110
- if __name__ == '__main__':
111
- class TestItem(Item):
112
- url = Field(nullable=False, field_type=str, max_length=100)
113
- title = Field(default="无标题", field_type=str)
114
-
115
- test_item = TestItem()
116
- test_item['title'] = '百度首页'
117
- test_item['url'] = 'hhh'
118
- # test_item.title = 'fffff'
119
- print(test_item)
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ from copy import deepcopy
4
+ from pprint import pformat
5
+ from typing import Any, Iterator, Dict
6
+ from collections.abc import MutableMapping
7
+
8
+ from crawlo.items import ItemMeta, Field
9
+ from crawlo.exceptions import ItemInitError, ItemAttributeError
10
+
11
+
12
+ class Item(MutableMapping, metaclass=ItemMeta):
13
+ FIELDS: Dict[str, Any] = {}
14
+
15
+ def __init__(self, *args, **kwargs):
16
+ if args:
17
+ raise ItemInitError(f"{self.__class__.__name__} 不支持位置参数:{args},请使用关键字参数初始化。")
18
+ if kwargs:
19
+ for key, value in kwargs.items():
20
+ self[key] = value
21
+
22
+ self._values: Dict[str, Any] = {}
23
+
24
+ # 初始化字段,默认值填充
25
+ for field_name, field_obj in self.FIELDS.items():
26
+ if field_obj.default is not None:
27
+ self._values[field_name] = field_obj.default
28
+
29
+ # 覆盖默认值或设置新值
30
+ for key, value in kwargs.items():
31
+ self[key] = value
32
+
33
+ def __getitem__(self, item: str) -> Any:
34
+ return self._values[item]
35
+
36
+ def __setitem__(self, key: str, value: Any) -> None:
37
+ if key not in self.FIELDS:
38
+ raise KeyError(f"{self.__class__.__name__} 不包含字段:{key}")
39
+
40
+ field = self.FIELDS[key]
41
+ try:
42
+ validated_value = field.validate(value, field_name=key)
43
+ self._values[key] = validated_value
44
+ except Exception as e:
45
+ error_lines = [
46
+ "",
47
+ "【字段校验失败】",
48
+ f"字段名称: {key}",
49
+ f"数据类型: {type(value)}",
50
+ f"原始值: {repr(value)}",
51
+ f"是否允许空值: {field.nullable}",
52
+ f"错误原因: {str(e)}",
53
+ ""
54
+ ]
55
+ detailed_error = "\n".join(error_lines)
56
+ raise type(e)(detailed_error) from e
57
+
58
+ def __delitem__(self, key: str) -> None:
59
+ del self._values[key]
60
+
61
+ def __setattr__(self, key: str, value: Any) -> None:
62
+ if not key.startswith("_"):
63
+ raise AttributeError(
64
+ f"设置字段值请使用 item[{key!r}] = {value!r}"
65
+ )
66
+ super().__setattr__(key, value)
67
+
68
+ def __getattr__(self, item: str) -> Any:
69
+ # 当获取不到属性时触发
70
+ raise AttributeError(
71
+ f"{self.__class__.__name__} 不支持字段:{item}。"
72
+ f"请先在 `{self.__class__.__name__}` 中声明该字段,再通过 item[{item!r}] 获取。"
73
+ )
74
+
75
+ def __getattribute__(self, item: str) -> Any:
76
+ # 属性拦截器,只要访问属性就会进入该方法
77
+ try:
78
+ field = super().__getattribute__("FIELDS")
79
+ if isinstance(field, dict) and item in field:
80
+ raise ItemAttributeError(
81
+ f"获取字段值请使用 item[{item!r}]"
82
+ )
83
+ except AttributeError:
84
+ pass # 如果 FIELDS 尚未定义,继续执行后续逻辑
85
+ return super().__getattribute__(item)
86
+
87
+ def __repr__(self) -> str:
88
+ return pformat(dict(self))
89
+
90
+ __str__ = __repr__
91
+
92
+ def __iter__(self) -> Iterator[str]:
93
+ return iter(self._values)
94
+
95
+ def __len__(self) -> int:
96
+ return len(self._values)
97
+
98
+ def to_dict(self) -> Dict[str, Any]:
99
+ return dict(self)
100
+
101
+ def copy(self) -> "Item":
102
+ return deepcopy(self)
103
+
104
+
105
+ if __name__ == '__main__':
106
+ class TestItem(Item):
107
+ url = Field(nullable=False, field_type=str, max_length=100)
108
+ title = Field(default="无标题", field_type=str)
109
+
110
+
111
+ test_item = TestItem()
112
+ test_item['title'] = '百度首页'
113
+ test_item['url'] = 'hhh'
114
+ # test_item.title = 'fffff'
115
+ print(test_item)
@@ -1,21 +1,21 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from crawlo import Request, Response
4
-
5
-
6
- class BaseMiddleware(object):
7
- def process_request(self, request, spider) -> None | Request | Response:
8
- # 请求预处理
9
- pass
10
-
11
- def process_response(self, request, response, spider) -> Request | Response:
12
- # 响应预处理
13
- pass
14
-
15
- def process_exception(self, request, exp, spider) -> None | Request | Response:
16
- # 异常预处理
17
- pass
18
-
19
- @classmethod
20
- def create_instance(cls, crawler):
21
- return cls()
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo import Request, Response
4
+
5
+
6
+ class BaseMiddleware(object):
7
+ def process_request(self, request, spider) -> None | Request | Response:
8
+ # 请求预处理
9
+ pass
10
+
11
+ def process_response(self, request, response, spider) -> Request | Response:
12
+ # 响应预处理
13
+ pass
14
+
15
+ def process_exception(self, request, exp, spider) -> None | Request | Response:
16
+ # 异常预处理
17
+ pass
18
+
19
+ @classmethod
20
+ def create_instance(cls, crawler):
21
+ return cls()
@@ -1,32 +1,32 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from crawlo.event import spider_opened
4
-
5
-
6
- class DefaultHeaderMiddleware(object):
7
-
8
- def __init__(self, user_agent, headers, spider):
9
- self.user_agent = user_agent
10
- self.headers = headers
11
- self.spider = spider
12
-
13
- @classmethod
14
- def create_instance(cls, crawler):
15
- o = cls(
16
- user_agent=crawler.settings.get('USER_AGENT'),
17
- headers=crawler.settings.get_dict('DEFAULT_HEADERS'),
18
- spider=crawler.spider
19
- )
20
- crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
21
- return o
22
-
23
- async def spider_opened(self):
24
- self.user_agent = getattr(self.spider, 'user_agent', self.user_agent)
25
- self.headers = getattr(self.spider, 'headers', self.headers)
26
- if self.user_agent:
27
- self.headers.setdefault('User-Agent', self.user_agent)
28
-
29
- def process_request(self, request, _spider):
30
- if self.headers:
31
- for key, value in self.headers.items():
32
- request.headers.setdefault(key, value)
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo.event import spider_opened
4
+
5
+
6
+ class DefaultHeaderMiddleware(object):
7
+
8
+ def __init__(self, user_agent, headers, spider):
9
+ self.user_agent = user_agent
10
+ self.headers = headers
11
+ self.spider = spider
12
+
13
+ @classmethod
14
+ def create_instance(cls, crawler):
15
+ o = cls(
16
+ user_agent=crawler.settings.get('USER_AGENT'),
17
+ headers=crawler.settings.get_dict('DEFAULT_HEADERS'),
18
+ spider=crawler.spider
19
+ )
20
+ crawler.subscriber.subscribe(o.spider_opened, event=spider_opened)
21
+ return o
22
+
23
+ async def spider_opened(self):
24
+ self.user_agent = getattr(self.spider, 'user_agent', self.user_agent)
25
+ self.headers = getattr(self.spider, 'headers', self.headers)
26
+ if self.user_agent:
27
+ self.headers.setdefault('User-Agent', self.user_agent)
28
+
29
+ def process_request(self, request, _spider):
30
+ if self.headers:
31
+ for key, value in self.headers.items():
32
+ request.headers.setdefault(key, value)
@@ -1,28 +1,28 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from asyncio import sleep
4
- from random import uniform
5
- from crawlo.utils.log import get_logger
6
- from crawlo.exceptions import NotConfiguredError
7
-
8
-
9
- class DownloadDelayMiddleware(object):
10
-
11
- def __init__(self, settings, log_level):
12
- self.delay = settings.get_float("DOWNLOAD_DELAY")
13
- if not self.delay:
14
- raise NotConfiguredError
15
- self.randomness = settings.get_bool("RANDOMNESS")
16
- self.floor, self.upper = settings.get_list("RANDOM_RANGE")
17
- self.logger = get_logger(self.__class__.__name__, log_level)
18
-
19
- @classmethod
20
- def create_instance(cls, crawler):
21
- o = cls(settings=crawler.settings, log_level=crawler.settings.get('LOG_LEVEL'))
22
- return o
23
-
24
- async def process_request(self, _request, _spider):
25
- if self.randomness:
26
- await sleep(uniform(self.delay * self.floor, self.delay * self.upper))
27
- else:
28
- await sleep(self.delay)
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from asyncio import sleep
4
+ from random import uniform
5
+ from crawlo.utils.log import get_logger
6
+ from crawlo.exceptions import NotConfiguredError
7
+
8
+
9
+ class DownloadDelayMiddleware(object):
10
+
11
+ def __init__(self, settings, log_level):
12
+ self.delay = settings.get_float("DOWNLOAD_DELAY")
13
+ if not self.delay:
14
+ raise NotConfiguredError
15
+ self.randomness = settings.get_bool("RANDOMNESS")
16
+ self.floor, self.upper = settings.get_list("RANDOM_RANGE")
17
+ self.logger = get_logger(self.__class__.__name__, log_level)
18
+
19
+ @classmethod
20
+ def create_instance(cls, crawler):
21
+ o = cls(settings=crawler.settings, log_level=crawler.settings.get('LOG_LEVEL'))
22
+ return o
23
+
24
+ async def process_request(self, _request, _spider):
25
+ if self.randomness:
26
+ await sleep(uniform(self.delay * self.floor, self.delay * self.upper))
27
+ else:
28
+ await sleep(self.delay)