cobweb-launcher 1.1.12__tar.gz → 1.1.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cobweb-launcher might be problematic. Click here for more details.

Files changed (39) hide show
  1. {cobweb-launcher-1.1.12/cobweb_launcher.egg-info → cobweb-launcher-1.1.13}/PKG-INFO +1 -1
  2. cobweb-launcher-1.1.13/cobweb/crawlers/__init__.py +2 -0
  3. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/crawlers/base_crawler.py +2 -2
  4. cobweb-launcher-1.1.13/cobweb/crawlers/file_crawler.py +98 -0
  5. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13/cobweb_launcher.egg-info}/PKG-INFO +1 -1
  6. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/setup.py +1 -1
  7. cobweb-launcher-1.1.12/cobweb/crawlers/__init__.py +0 -2
  8. cobweb-launcher-1.1.12/cobweb/crawlers/file_crawler.py +0 -173
  9. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/LICENSE +0 -0
  10. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/README.md +0 -0
  11. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/__init__.py +0 -0
  12. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/base/__init__.py +0 -0
  13. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/base/common_queue.py +0 -0
  14. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/base/decorators.py +0 -0
  15. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/base/item.py +0 -0
  16. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/base/log.py +0 -0
  17. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/base/request.py +0 -0
  18. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/base/response.py +0 -0
  19. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/base/seed.py +0 -0
  20. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/constant.py +0 -0
  21. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/db/__init__.py +0 -0
  22. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/db/redis_db.py +0 -0
  23. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/exceptions/__init__.py +0 -0
  24. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/exceptions/oss_db_exception.py +0 -0
  25. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/launchers/__init__.py +0 -0
  26. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/launchers/launcher.py +0 -0
  27. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/launchers/launcher_pro.py +0 -0
  28. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/pipelines/__init__.py +0 -0
  29. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/pipelines/base_pipeline.py +0 -0
  30. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/pipelines/loghub_pipeline.py +0 -0
  31. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/setting.py +0 -0
  32. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/utils/__init__.py +0 -0
  33. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/utils/oss.py +0 -0
  34. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb/utils/tools.py +0 -0
  35. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb_launcher.egg-info/SOURCES.txt +0 -0
  36. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb_launcher.egg-info/dependency_links.txt +0 -0
  37. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb_launcher.egg-info/requires.txt +0 -0
  38. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/cobweb_launcher.egg-info/top_level.txt +0 -0
  39. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.13}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: cobweb-launcher
3
- Version: 1.1.12
3
+ Version: 1.1.13
4
4
  Summary: spider_hole
5
5
  Home-page: https://github.com/Juannie-PP/cobweb
6
6
  Author: Juannie-PP
@@ -0,0 +1,2 @@
1
+ from .base_crawler import Crawler
2
+ from .file_crawler import FileCrawlerAir
@@ -77,7 +77,7 @@ class Crawler(threading.Thread):
77
77
  detail=seed_detail_log_info, retry=item.seed.params.retry,
78
78
  priority=item.seed.params.priority,
79
79
  seed_version=item.seed.params.seed_version,
80
- identifier=item.seed.params.identifier,
80
+ identifier=item.seed.identifier or "",
81
81
  status=it.response, response=response_detail_log_info
82
82
  ))
83
83
  parse_iterators = self.parse(it)
@@ -108,7 +108,7 @@ class Crawler(threading.Thread):
108
108
  logger.info(LogTemplate.download_exception.format(
109
109
  detail=seed_detail_log_info, retry=seed.params.retry,
110
110
  priority=seed.params.priority, seed_version=seed.params.seed_version,
111
- identifier=seed.params.identifier, exception=e
111
+ identifier=seed.identifier or "", exception=e
112
112
  ))
113
113
  seed.params.retry += 1
114
114
  self.launcher_queue['todo'].push(seed)
@@ -0,0 +1,98 @@
1
+ import os
2
+ from typing import Union
3
+ from cobweb import setting
4
+ from cobweb.utils import OssUtil
5
+ from cobweb.crawlers import Crawler
6
+ from cobweb.base import Seed, BaseItem, Request, Response
7
+ from cobweb.exceptions import OssDBPutPartError, OssDBMergeError
8
+
9
+
10
+ oss_util = OssUtil(is_path_style=bool(int(os.getenv("PRIVATE_LINK", 0))))
11
+
12
+
13
+ class FileCrawlerAir(Crawler):
14
+
15
+ @staticmethod
16
+ def download(item: Request) -> Union[Seed, BaseItem, Response, str]:
17
+ seed_dict = item.seed.to_dict
18
+ seed_dict["bucket_name"] = oss_util.bucket
19
+ try:
20
+ seed_dict["oss_path"] = key = item.seed.oss_path or getattr(item, "oss_path")
21
+
22
+ if oss_util.exists(key):
23
+ seed_dict["data_size"] = oss_util.head(key).content_length
24
+ yield Response(item.seed, "exists", **seed_dict)
25
+
26
+ else:
27
+ seed_dict.setdefault("end", "")
28
+ seed_dict.setdefault("start", 0)
29
+
30
+ if seed_dict["end"] or seed_dict["start"]:
31
+ start, end = seed_dict["start"], seed_dict["end"]
32
+ item.request_setting["headers"]['Range'] = f'bytes={start}-{end}'
33
+
34
+ if not item.seed.identifier:
35
+ content = b""
36
+ chunk_size = oss_util.chunk_size
37
+ min_upload_size = oss_util.min_upload_size
38
+ seed_dict.setdefault("position", 1)
39
+
40
+ response = item.download()
41
+
42
+ content_type = response.headers.get("content-type", "").split(";")[0]
43
+ seed_dict["data_size"] = content_length = int(response.headers.get("content-length", 0))
44
+
45
+ if content_type and content_type in setting.FILE_FILTER_CONTENT_TYPE:
46
+ """过滤响应文件类型"""
47
+ response.close()
48
+ seed_dict["filter"] = True
49
+ seed_dict["msg"] = f"response content type is {content_type}"
50
+ yield Response(item.seed, response, **seed_dict)
51
+
52
+ elif seed_dict['position'] == 1 and min_upload_size >= content_length > 0:
53
+ """过小文件标识返回"""
54
+ response.close()
55
+ seed_dict["filter"] = True
56
+ seed_dict["msg"] = "file size is too small"
57
+ yield Response(item.seed, response, **seed_dict)
58
+
59
+ elif seed_dict['position'] == 1 and chunk_size > content_length > min_upload_size:
60
+ """小文件直接下载"""
61
+ for part_data in response.iter_content(chunk_size):
62
+ content += part_data
63
+ response.close()
64
+ oss_util.put(key, content)
65
+ yield Response(item.seed, response, **seed_dict)
66
+
67
+ else:
68
+ """中大文件同步分片下载"""
69
+ seed_dict.setdefault("upload_id", oss_util.init_part(key).upload_id)
70
+
71
+ for part_data in response.iter_content(chunk_size):
72
+ content += part_data
73
+ if len(content) >= chunk_size:
74
+ upload_data = content[:chunk_size]
75
+ content = content[chunk_size:]
76
+ oss_util.put_part(key, seed_dict["upload_id"], seed_dict['position'], content)
77
+ seed_dict['start'] += len(upload_data)
78
+ seed_dict['position'] += 1
79
+
80
+ response.close()
81
+
82
+ if content:
83
+ oss_util.put_part(key, seed_dict["upload_id"], seed_dict['position'], content)
84
+ oss_util.merge(key, seed_dict["upload_id"])
85
+ seed_dict["data_size"] = oss_util.head(key).content_length
86
+ yield Response(item.seed, response, **seed_dict)
87
+
88
+ elif item.seed.identifier == "merge":
89
+ oss_util.merge(key, seed_dict["upload_id"])
90
+ seed_dict["data_size"] = oss_util.head(key).content_length
91
+ yield Response(item.seed, "merge", **seed_dict)
92
+
93
+ except OssDBPutPartError:
94
+ yield Seed(seed_dict)
95
+ except OssDBMergeError:
96
+ yield Seed(seed_dict, identifier="merge")
97
+
98
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: cobweb-launcher
3
- Version: 1.1.12
3
+ Version: 1.1.13
4
4
  Summary: spider_hole
5
5
  Home-page: https://github.com/Juannie-PP/cobweb
6
6
  Author: Juannie-PP
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="cobweb-launcher",
8
- version="1.1.12",
8
+ version="1.1.13",
9
9
  packages=find_packages(),
10
10
  url="https://github.com/Juannie-PP/cobweb",
11
11
  license="MIT",
@@ -1,2 +0,0 @@
1
- from .base_crawler import Crawler
2
- from .file_crawler import FileCrawlerAir, FileCrawlerPro
@@ -1,173 +0,0 @@
1
-
2
- from typing import Union
3
- from cobweb import setting
4
- from cobweb.utils import OssUtil
5
- from cobweb.crawlers import Crawler
6
- from cobweb.base import Seed, BaseItem, Request, Response
7
- from cobweb.exceptions import OssDBPutPartError, OssDBMergeError
8
-
9
-
10
- oss_util = OssUtil()
11
-
12
-
13
- class FileCrawlerAir(Crawler):
14
-
15
- @staticmethod
16
- def download(item: Request) -> Union[Seed, BaseItem, Response, str]:
17
- seed_dict = item.seed.to_dict
18
- bucket_name = oss_util.bucket
19
- try:
20
- key = item.seed.oss_path or getattr(item, "oss_path", None)
21
- if oss_util.exists(key):
22
- content_length = oss_util.head(key).content_length
23
- yield Response(item.seed, "exists", bucket_name=bucket_name, data_size=content_length, **seed_dict)
24
-
25
- end = seed_dict.get("end", "")
26
- start = seed_dict.get("start", "0")
27
-
28
- if end or int(start):
29
- item.request_setting["headers"]['Range'] = f'bytes={start}-{end}'
30
-
31
- if not item.seed.params.identifier:
32
- content = b""
33
- chunk_size = oss_util.chunk_size
34
- min_upload_size = oss_util.min_upload_size
35
- position = seed_dict.get("position", 1)
36
-
37
- response = item.download()
38
-
39
- content_length = int(response.headers.get("content-length", 0))
40
- content_type = response.headers.get("content-type", "").split(";")[0]
41
- if content_type and content_type in setting.FILE_FILTER_CONTENT_TYPE:
42
- yield Response(
43
- item.seed, response, filter=True, msg=f"response content type is {content_type}",
44
- bucket_name=bucket_name, data_size=content_length, **seed_dict
45
- )
46
- elif position == 1 and min_upload_size >= content_length > 0:
47
- """过小文件标识返回"""
48
- yield Response(
49
- item.seed, response, filter=True, msg="file size is too small",
50
- bucket_name=bucket_name, data_size=content_length, **seed_dict
51
- )
52
- elif position == 1 and chunk_size > content_length > min_upload_size:
53
- """小文件直接下载"""
54
- for part_data in response.iter_content(chunk_size):
55
- content += part_data
56
- oss_util.put(key, content)
57
- yield Response(item.seed, response, bucket_name=bucket_name, data_size=content_length, **seed_dict)
58
- response.close()
59
- else:
60
- """中大文件同步分片下载"""
61
- upload_content_length = 0
62
- if not seed_dict.get("upload_id"):
63
- seed_dict["upload_id"] = oss_util.init_part(key).upload_id
64
- upload_id = seed_dict["upload_id"]
65
- for part_data in response.iter_content(chunk_size):
66
- content += part_data
67
- if len(content) >= chunk_size:
68
- upload_data = content[:chunk_size]
69
- content = content[chunk_size:]
70
- oss_util.put_part(key, upload_id, position, upload_data)
71
- upload_content_length += len(upload_data)
72
- position += 1
73
- seed_dict['position'] = position
74
- seed_dict['start'] = upload_content_length
75
-
76
- response.close()
77
- if content:
78
- oss_util.put_part(key, upload_id, position, content)
79
- content_length += len(content)
80
- oss_util.merge(key, upload_id)
81
- yield Response(item.seed, response, bucket_name=bucket_name, data_size=content_length, **seed_dict)
82
-
83
- elif item.seed.params.identifier == "merge":
84
- oss_util.merge(key, seed_dict["upload_id"])
85
- content_length = oss_util.head(key).content_length
86
- yield Response(item.seed, "merge", bucket_name=bucket_name, data_size=content_length, **seed_dict)
87
- except OssDBPutPartError:
88
- yield Seed(seed_dict)
89
- except OssDBMergeError:
90
- yield Seed(seed_dict, identifier="merge")
91
-
92
-
93
- class FileCrawlerPro(FileCrawlerAir):
94
-
95
- @staticmethod
96
- def download(item: Request) -> Union[Seed, BaseItem, Response, str]:
97
- seed_dict = item.seed.to_dict
98
- bucket_name = oss_util.bucket
99
- try:
100
- key = item.seed.oss_path or getattr(item, "oss_path", None)
101
- if oss_util.exists(key):
102
- content_length = oss_util.head(key).content_length
103
- yield Response(item.seed, "exists", bucket_name=bucket_name, data_size=content_length, **seed_dict)
104
-
105
- end = seed_dict.get("end", "")
106
- start = seed_dict.get("start", "0")
107
-
108
- if end or int(start):
109
- item.request_setting["headers"]['Range'] = f'bytes={start}-{end}'
110
-
111
- if not item.seed.params.identifier:
112
- content = b""
113
- chunk_size = oss_util.chunk_size
114
- min_upload_size = oss_util.min_upload_size
115
- position = seed_dict.get("position", 1)
116
-
117
- response = item.download()
118
-
119
- content_length = int(response.headers.get("content-length", 0))
120
- content_type = response.headers.get("content-type", "").split(";")[0]
121
- if content_type and content_type in setting.FILE_FILTER_CONTENT_TYPE:
122
- yield Response(
123
- item.seed, response, filter=True, msg=f"response content type is {content_type}",
124
- bucket_name=bucket_name, data_size=content_length, **seed_dict
125
- )
126
- response.close()
127
- elif position == 1 and min_upload_size >= content_length > 0:
128
- """过小文件标识返回"""
129
- yield Response(
130
- item.seed, response, filter=True, msg="file size is too small",
131
- bucket_name=bucket_name, data_size=content_length, **seed_dict
132
- )
133
- response.close()
134
- elif position == 1 and chunk_size > content_length > min_upload_size:
135
- """小文件直接下载"""
136
- for part_data in response.iter_content(chunk_size):
137
- content += part_data
138
- oss_util.put(key, content)
139
- yield Response(item.seed, response, bucket_name=bucket_name, data_size=content_length, **seed_dict)
140
- response.close()
141
- else:
142
- """中大文件同步分片下载"""
143
- upload_content_length = 0
144
- if not seed_dict.get("upload_id"):
145
- seed_dict["upload_id"] = oss_util.init_part(key).upload_id
146
- upload_id = seed_dict["upload_id"]
147
- for part_data in response.iter_content(chunk_size):
148
- content += part_data
149
- if len(content) >= chunk_size:
150
- upload_data = content[:chunk_size]
151
- content = content[chunk_size:]
152
- oss_util.put_part(key, upload_id, position, upload_data)
153
- upload_content_length += len(upload_data)
154
- position += 1
155
- seed_dict['position'] = position
156
- seed_dict['start'] = upload_content_length
157
-
158
- if content:
159
- oss_util.put_part(key, upload_id, position, content)
160
- content_length += len(content)
161
- oss_util.merge(key, upload_id)
162
- yield Response(item.seed, response, bucket_name=bucket_name, data_size=content_length, **seed_dict)
163
- response.close()
164
-
165
- elif item.seed.params.identifier == "merge":
166
- oss_util.merge(key, seed_dict["upload_id"])
167
- content_length = oss_util.head(key).content_length
168
- yield Response(item.seed, "merge", bucket_name=bucket_name, data_size=content_length, **seed_dict)
169
-
170
- except OssDBPutPartError:
171
- yield Seed(seed_dict)
172
- except OssDBMergeError:
173
- yield Seed(seed_dict, identifier="merge")