cobweb-launcher 1.1.12__tar.gz → 1.1.14__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cobweb-launcher might be problematic. Click here for more details.

Files changed (40) hide show
  1. {cobweb-launcher-1.1.12/cobweb_launcher.egg-info → cobweb-launcher-1.1.14}/PKG-INFO +1 -1
  2. cobweb-launcher-1.1.14/cobweb/crawlers/__init__.py +2 -0
  3. cobweb-launcher-1.1.14/cobweb/crawlers/base_crawler.py +128 -0
  4. cobweb-launcher-1.1.14/cobweb/crawlers/file_crawler.py +98 -0
  5. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/launchers/launcher.py +1 -1
  6. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/launchers/launcher_pro.py +1 -1
  7. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14/cobweb_launcher.egg-info}/PKG-INFO +1 -1
  8. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/setup.py +1 -1
  9. cobweb-launcher-1.1.12/cobweb/crawlers/__init__.py +0 -2
  10. cobweb-launcher-1.1.12/cobweb/crawlers/base_crawler.py +0 -122
  11. cobweb-launcher-1.1.12/cobweb/crawlers/file_crawler.py +0 -173
  12. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/LICENSE +0 -0
  13. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/README.md +0 -0
  14. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/__init__.py +0 -0
  15. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/base/__init__.py +0 -0
  16. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/base/common_queue.py +0 -0
  17. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/base/decorators.py +0 -0
  18. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/base/item.py +0 -0
  19. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/base/log.py +0 -0
  20. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/base/request.py +0 -0
  21. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/base/response.py +0 -0
  22. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/base/seed.py +0 -0
  23. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/constant.py +0 -0
  24. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/db/__init__.py +0 -0
  25. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/db/redis_db.py +0 -0
  26. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/exceptions/__init__.py +0 -0
  27. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/exceptions/oss_db_exception.py +0 -0
  28. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/launchers/__init__.py +0 -0
  29. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/pipelines/__init__.py +0 -0
  30. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/pipelines/base_pipeline.py +0 -0
  31. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/pipelines/loghub_pipeline.py +0 -0
  32. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/setting.py +0 -0
  33. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/utils/__init__.py +0 -0
  34. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/utils/oss.py +0 -0
  35. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb/utils/tools.py +0 -0
  36. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb_launcher.egg-info/SOURCES.txt +0 -0
  37. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb_launcher.egg-info/dependency_links.txt +0 -0
  38. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb_launcher.egg-info/requires.txt +0 -0
  39. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/cobweb_launcher.egg-info/top_level.txt +0 -0
  40. {cobweb-launcher-1.1.12 → cobweb-launcher-1.1.14}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: cobweb-launcher
3
- Version: 1.1.12
3
+ Version: 1.1.14
4
4
  Summary: spider_hole
5
5
  Home-page: https://github.com/Juannie-PP/cobweb
6
6
  Author: Juannie-PP
@@ -0,0 +1,2 @@
1
+ from .base_crawler import Crawler
2
+ from .file_crawler import FileCrawlerAir
@@ -0,0 +1,128 @@
1
+ import threading
2
+
3
+ from inspect import isgenerator
4
+ from typing import Union, Callable, Mapping
5
+
6
+ from cobweb.base import Queue, Seed, BaseItem, Request, Response, logger
7
+ from cobweb.constant import DealModel, LogTemplate
8
+ from cobweb.utils import download_log_info
9
+ from cobweb import setting
10
+
11
+
12
+ class Crawler(threading.Thread):
13
+
14
+ def __init__(
15
+ self,
16
+ upload_queue: Queue,
17
+ custom_func: Union[Mapping[str, Callable]],
18
+ launcher_queue: Union[Mapping[str, Queue]],
19
+ ):
20
+ super().__init__()
21
+
22
+ self.upload_queue = upload_queue
23
+ for func_name, _callable in custom_func.items():
24
+ if isinstance(_callable, Callable):
25
+ self.__setattr__(func_name, _callable)
26
+
27
+ self.launcher_queue = launcher_queue
28
+
29
+ self.spider_thread_num = setting.SPIDER_THREAD_NUM
30
+ self.max_retries = setting.SPIDER_MAX_RETRIES
31
+
32
+ @staticmethod
33
+ def request(seed: Seed) -> Union[Request, BaseItem]:
34
+ stream = True if setting.DOWNLOAD_MODEL else False
35
+ yield Request(seed.url, seed, stream=stream, timeout=5)
36
+
37
+ @staticmethod
38
+ def download(item: Request) -> Union[Seed, BaseItem, Response, str]:
39
+ response = item.download()
40
+ yield Response(item.seed, response, **item.to_dict)
41
+
42
+ @staticmethod
43
+ def parse(item: Response) -> BaseItem:
44
+ pass
45
+
46
+ def get_seed(self) -> Seed:
47
+ return self.launcher_queue['todo'].pop()
48
+
49
+ def distribute(self, item, seed):
50
+ if isinstance(item, BaseItem):
51
+ self.upload_queue.push(item)
52
+ elif isinstance(item, Seed):
53
+ self.launcher_queue['new'].push(item)
54
+ elif isinstance(item, str) and item == DealModel.poll:
55
+ self.launcher_queue['todo'].push(seed)
56
+ elif isinstance(item, str) and item == DealModel.done:
57
+ self.launcher_queue['done'].push(seed)
58
+ elif isinstance(item, str) and item == DealModel.fail:
59
+ seed.identifier = DealModel.fail
60
+ self.launcher_queue['done'].push(seed)
61
+ else:
62
+ raise TypeError("yield value type error!")
63
+
64
+ def spider(self):
65
+ while True:
66
+ seed = self.get_seed()
67
+
68
+ if not seed:
69
+ continue
70
+
71
+ elif seed.params.retry >= self.max_retries:
72
+ seed.params.identifier = DealModel.fail
73
+ self.launcher_queue['done'].push(seed)
74
+ continue
75
+
76
+ seed_detail_log_info = download_log_info(seed.to_dict)
77
+
78
+ request_iterators = self.request(seed)
79
+
80
+ if not isgenerator(request_iterators):
81
+ raise TypeError("request function isn't a generator!")
82
+
83
+ for request_item in request_iterators:
84
+
85
+ if isinstance(request_item, BaseItem):
86
+ self.upload_queue.push(request_item)
87
+
88
+ elif isinstance(request_item, Request):
89
+ try:
90
+ download_iterators = self.download(request_item)
91
+ if not isgenerator(download_iterators):
92
+ raise TypeError("download function isn't a generator")
93
+ for download_item in download_iterators:
94
+ if isinstance(download_item, Response):
95
+ response_detail_log_info = download_log_info(download_item.to_dict)
96
+ logger.info(LogTemplate.download_info.format(
97
+ detail=seed_detail_log_info,
98
+ retry=seed.params.retry,
99
+ priority=seed.params.priority,
100
+ seed_version=seed.params.seed_version,
101
+ identifier=seed.identifier or "",
102
+ status=download_item.response,
103
+ response=response_detail_log_info
104
+ ))
105
+ parse_iterators = self.parse(download_item)
106
+ if not isgenerator(parse_iterators):
107
+ raise TypeError("parse function isn't a generator")
108
+ for parse_item in parse_iterators:
109
+ if isinstance(parse_item, Response):
110
+ raise TypeError("upload_item can't be a Response instance")
111
+ self.distribute(parse_item, seed)
112
+ else:
113
+ self.distribute(download_item, seed)
114
+ except Exception as e:
115
+ logger.info(LogTemplate.download_exception.format(
116
+ detail=seed_detail_log_info,
117
+ retry=seed.params.retry,
118
+ priority=seed.params.priority,
119
+ seed_version=seed.params.seed_version,
120
+ identifier=seed.identifier or "", exception=e
121
+ ))
122
+ seed.params.retry += 1
123
+ self.launcher_queue['todo'].push(seed)
124
+
125
+ def run(self):
126
+ for index in range(self.spider_thread_num):
127
+ threading.Thread(name=f"spider_{index}", target=self.spider).start()
128
+
@@ -0,0 +1,98 @@
1
+ import os
2
+ from typing import Union
3
+ from cobweb import setting
4
+ from cobweb.utils import OssUtil
5
+ from cobweb.crawlers import Crawler
6
+ from cobweb.base import Seed, BaseItem, Request, Response
7
+ from cobweb.exceptions import OssDBPutPartError, OssDBMergeError
8
+
9
+
10
+ oss_util = OssUtil(is_path_style=bool(int(os.getenv("PRIVATE_LINK", 0))))
11
+
12
+
13
+ class FileCrawlerAir(Crawler):
14
+
15
+ @staticmethod
16
+ def download(item: Request) -> Union[Seed, BaseItem, Response, str]:
17
+ seed_dict = item.seed.to_dict
18
+ seed_dict["bucket_name"] = oss_util.bucket
19
+ try:
20
+ seed_dict["oss_path"] = key = item.seed.oss_path or getattr(item, "oss_path")
21
+
22
+ if oss_util.exists(key):
23
+ seed_dict["data_size"] = oss_util.head(key).content_length
24
+ yield Response(item.seed, "exists", **seed_dict)
25
+
26
+ else:
27
+ seed_dict.setdefault("end", "")
28
+ seed_dict.setdefault("start", 0)
29
+
30
+ if seed_dict["end"] or seed_dict["start"]:
31
+ start, end = seed_dict["start"], seed_dict["end"]
32
+ item.request_setting["headers"]['Range'] = f'bytes={start}-{end}'
33
+
34
+ if not item.seed.identifier:
35
+ content = b""
36
+ chunk_size = oss_util.chunk_size
37
+ min_upload_size = oss_util.min_upload_size
38
+ seed_dict.setdefault("position", 1)
39
+
40
+ response = item.download()
41
+
42
+ content_type = response.headers.get("content-type", "").split(";")[0]
43
+ seed_dict["data_size"] = content_length = int(response.headers.get("content-length", 0))
44
+
45
+ if content_type and content_type in setting.FILE_FILTER_CONTENT_TYPE:
46
+ """过滤响应文件类型"""
47
+ response.close()
48
+ seed_dict["filter"] = True
49
+ seed_dict["msg"] = f"response content type is {content_type}"
50
+ yield Response(item.seed, response, **seed_dict)
51
+
52
+ elif seed_dict['position'] == 1 and min_upload_size >= content_length > 0:
53
+ """过小文件标识返回"""
54
+ response.close()
55
+ seed_dict["filter"] = True
56
+ seed_dict["msg"] = "file size is too small"
57
+ yield Response(item.seed, response, **seed_dict)
58
+
59
+ elif seed_dict['position'] == 1 and chunk_size > content_length > min_upload_size:
60
+ """小文件直接下载"""
61
+ for part_data in response.iter_content(chunk_size):
62
+ content += part_data
63
+ response.close()
64
+ oss_util.put(key, content)
65
+ yield Response(item.seed, response, **seed_dict)
66
+
67
+ else:
68
+ """中大文件同步分片下载"""
69
+ seed_dict.setdefault("upload_id", oss_util.init_part(key).upload_id)
70
+
71
+ for part_data in response.iter_content(chunk_size):
72
+ content += part_data
73
+ if len(content) >= chunk_size:
74
+ upload_data = content[:chunk_size]
75
+ content = content[chunk_size:]
76
+ oss_util.put_part(key, seed_dict["upload_id"], seed_dict['position'], content)
77
+ seed_dict['start'] += len(upload_data)
78
+ seed_dict['position'] += 1
79
+
80
+ response.close()
81
+
82
+ if content:
83
+ oss_util.put_part(key, seed_dict["upload_id"], seed_dict['position'], content)
84
+ oss_util.merge(key, seed_dict["upload_id"])
85
+ seed_dict["data_size"] = oss_util.head(key).content_length
86
+ yield Response(item.seed, response, **seed_dict)
87
+
88
+ elif item.seed.identifier == "merge":
89
+ oss_util.merge(key, seed_dict["upload_id"])
90
+ seed_dict["data_size"] = oss_util.head(key).content_length
91
+ yield Response(item.seed, "merge", **seed_dict)
92
+
93
+ except OssDBPutPartError:
94
+ yield Seed(seed_dict)
95
+ except OssDBMergeError:
96
+ yield Seed(seed_dict, identifier="merge")
97
+
98
+
@@ -93,7 +93,7 @@ class Launcher(threading.Thread):
93
93
  @launcher.request
94
94
  def request(seed: Seed) -> Union[Request, BaseItem]:
95
95
  ...
96
- return Request(seed.url, seed)
96
+ yield Request(seed.url, seed)
97
97
  """
98
98
  def decorator(func):
99
99
  self.__CUSTOM_FUNC__["request"] = func
@@ -117,7 +117,7 @@ class LauncherPro(Launcher):
117
117
  seed = self.__LAUNCHER_QUEUE__['done'].pop()
118
118
  if not seed:
119
119
  break
120
- if seed.params.identifier == DealModel.fail:
120
+ if seed.identifier == DealModel.fail:
121
121
  f_seeds.append(seed.to_string)
122
122
  elif self._done_model == 1:
123
123
  s_seeds.append(seed.to_string)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: cobweb-launcher
3
- Version: 1.1.12
3
+ Version: 1.1.14
4
4
  Summary: spider_hole
5
5
  Home-page: https://github.com/Juannie-PP/cobweb
6
6
  Author: Juannie-PP
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="cobweb-launcher",
8
- version="1.1.12",
8
+ version="1.1.14",
9
9
  packages=find_packages(),
10
10
  url="https://github.com/Juannie-PP/cobweb",
11
11
  license="MIT",
@@ -1,2 +0,0 @@
1
- from .base_crawler import Crawler
2
- from .file_crawler import FileCrawlerAir, FileCrawlerPro
@@ -1,122 +0,0 @@
1
- import threading
2
-
3
- from inspect import isgenerator
4
- from typing import Union, Callable, Mapping
5
-
6
- from cobweb.base import Queue, Seed, BaseItem, Request, Response, logger
7
- from cobweb.constant import DealModel, LogTemplate
8
- from cobweb.utils import download_log_info
9
- from cobweb import setting
10
-
11
-
12
- class Crawler(threading.Thread):
13
-
14
- def __init__(
15
- self,
16
- upload_queue: Queue,
17
- custom_func: Union[Mapping[str, Callable]],
18
- launcher_queue: Union[Mapping[str, Queue]],
19
- ):
20
- super().__init__()
21
-
22
- self.upload_queue = upload_queue
23
- for func_name, _callable in custom_func.items():
24
- if isinstance(_callable, Callable):
25
- self.__setattr__(func_name, _callable)
26
-
27
- self.launcher_queue = launcher_queue
28
-
29
- self.spider_thread_num = setting.SPIDER_THREAD_NUM
30
- self.max_retries = setting.SPIDER_MAX_RETRIES
31
-
32
- @staticmethod
33
- def request(seed: Seed) -> Union[Request, BaseItem]:
34
- stream = True if setting.DOWNLOAD_MODEL else False
35
- return Request(seed.url, seed, stream=stream, timeout=5)
36
-
37
- @staticmethod
38
- def download(item: Request) -> Union[Seed, BaseItem, Response, str]:
39
- response = item.download()
40
- yield Response(item.seed, response, **item.to_dict)
41
-
42
- @staticmethod
43
- def parse(item: Response) -> BaseItem:
44
- pass
45
-
46
- def get(self) -> Seed:
47
- return self.launcher_queue['todo'].pop()
48
-
49
- def spider(self):
50
- while True:
51
- seed = self.get()
52
-
53
- if not seed:
54
- continue
55
-
56
- elif seed.params.retry >= self.max_retries:
57
- seed.params.identifier = DealModel.fail
58
- self.launcher_queue['done'].push(seed)
59
- continue
60
-
61
- item = self.request(seed)
62
-
63
- if isinstance(item, Request):
64
-
65
- download_iterators = self.download(item)
66
-
67
- if not isgenerator(download_iterators):
68
- raise TypeError("download function isn't a generator")
69
-
70
- seed_detail_log_info = download_log_info(seed.to_dict)
71
-
72
- try:
73
- for it in download_iterators:
74
- if isinstance(it, Response):
75
- response_detail_log_info = download_log_info(it.to_dict)
76
- logger.info(LogTemplate.download_info.format(
77
- detail=seed_detail_log_info, retry=item.seed.params.retry,
78
- priority=item.seed.params.priority,
79
- seed_version=item.seed.params.seed_version,
80
- identifier=item.seed.params.identifier,
81
- status=it.response, response=response_detail_log_info
82
- ))
83
- parse_iterators = self.parse(it)
84
- if not isgenerator(parse_iterators):
85
- raise TypeError("parse function isn't a generator")
86
- for upload_item in parse_iterators:
87
- if not isinstance(upload_item, BaseItem):
88
- raise TypeError("upload_item isn't BaseItem subclass")
89
- self.upload_queue.push(upload_item)
90
- elif isinstance(it, BaseItem):
91
- self.upload_queue.push(it)
92
- elif isinstance(it, Seed):
93
- self.launcher_queue['new'].push(it)
94
- elif isinstance(it, str) and it == DealModel.poll:
95
- self.launcher_queue['todo'].push(item)
96
- break
97
- elif isinstance(it, str) and it == DealModel.done:
98
- self.launcher_queue['done'].push(seed)
99
- break
100
- elif isinstance(it, str) and it == DealModel.fail:
101
- seed.params.identifier = DealModel.fail
102
- self.launcher_queue['done'].push(seed)
103
- break
104
- else:
105
- raise TypeError("yield value type error!")
106
-
107
- except Exception as e:
108
- logger.info(LogTemplate.download_exception.format(
109
- detail=seed_detail_log_info, retry=seed.params.retry,
110
- priority=seed.params.priority, seed_version=seed.params.seed_version,
111
- identifier=seed.params.identifier, exception=e
112
- ))
113
- seed.params.retry += 1
114
- self.launcher_queue['todo'].push(seed)
115
-
116
- elif isinstance(item, BaseItem):
117
- self.upload_queue.push(item)
118
-
119
- def run(self):
120
- for index in range(self.spider_thread_num):
121
- threading.Thread(name=f"spider_{index}", target=self.spider).start()
122
-
@@ -1,173 +0,0 @@
1
-
2
- from typing import Union
3
- from cobweb import setting
4
- from cobweb.utils import OssUtil
5
- from cobweb.crawlers import Crawler
6
- from cobweb.base import Seed, BaseItem, Request, Response
7
- from cobweb.exceptions import OssDBPutPartError, OssDBMergeError
8
-
9
-
10
- oss_util = OssUtil()
11
-
12
-
13
- class FileCrawlerAir(Crawler):
14
-
15
- @staticmethod
16
- def download(item: Request) -> Union[Seed, BaseItem, Response, str]:
17
- seed_dict = item.seed.to_dict
18
- bucket_name = oss_util.bucket
19
- try:
20
- key = item.seed.oss_path or getattr(item, "oss_path", None)
21
- if oss_util.exists(key):
22
- content_length = oss_util.head(key).content_length
23
- yield Response(item.seed, "exists", bucket_name=bucket_name, data_size=content_length, **seed_dict)
24
-
25
- end = seed_dict.get("end", "")
26
- start = seed_dict.get("start", "0")
27
-
28
- if end or int(start):
29
- item.request_setting["headers"]['Range'] = f'bytes={start}-{end}'
30
-
31
- if not item.seed.params.identifier:
32
- content = b""
33
- chunk_size = oss_util.chunk_size
34
- min_upload_size = oss_util.min_upload_size
35
- position = seed_dict.get("position", 1)
36
-
37
- response = item.download()
38
-
39
- content_length = int(response.headers.get("content-length", 0))
40
- content_type = response.headers.get("content-type", "").split(";")[0]
41
- if content_type and content_type in setting.FILE_FILTER_CONTENT_TYPE:
42
- yield Response(
43
- item.seed, response, filter=True, msg=f"response content type is {content_type}",
44
- bucket_name=bucket_name, data_size=content_length, **seed_dict
45
- )
46
- elif position == 1 and min_upload_size >= content_length > 0:
47
- """过小文件标识返回"""
48
- yield Response(
49
- item.seed, response, filter=True, msg="file size is too small",
50
- bucket_name=bucket_name, data_size=content_length, **seed_dict
51
- )
52
- elif position == 1 and chunk_size > content_length > min_upload_size:
53
- """小文件直接下载"""
54
- for part_data in response.iter_content(chunk_size):
55
- content += part_data
56
- oss_util.put(key, content)
57
- yield Response(item.seed, response, bucket_name=bucket_name, data_size=content_length, **seed_dict)
58
- response.close()
59
- else:
60
- """中大文件同步分片下载"""
61
- upload_content_length = 0
62
- if not seed_dict.get("upload_id"):
63
- seed_dict["upload_id"] = oss_util.init_part(key).upload_id
64
- upload_id = seed_dict["upload_id"]
65
- for part_data in response.iter_content(chunk_size):
66
- content += part_data
67
- if len(content) >= chunk_size:
68
- upload_data = content[:chunk_size]
69
- content = content[chunk_size:]
70
- oss_util.put_part(key, upload_id, position, upload_data)
71
- upload_content_length += len(upload_data)
72
- position += 1
73
- seed_dict['position'] = position
74
- seed_dict['start'] = upload_content_length
75
-
76
- response.close()
77
- if content:
78
- oss_util.put_part(key, upload_id, position, content)
79
- content_length += len(content)
80
- oss_util.merge(key, upload_id)
81
- yield Response(item.seed, response, bucket_name=bucket_name, data_size=content_length, **seed_dict)
82
-
83
- elif item.seed.params.identifier == "merge":
84
- oss_util.merge(key, seed_dict["upload_id"])
85
- content_length = oss_util.head(key).content_length
86
- yield Response(item.seed, "merge", bucket_name=bucket_name, data_size=content_length, **seed_dict)
87
- except OssDBPutPartError:
88
- yield Seed(seed_dict)
89
- except OssDBMergeError:
90
- yield Seed(seed_dict, identifier="merge")
91
-
92
-
93
- class FileCrawlerPro(FileCrawlerAir):
94
-
95
- @staticmethod
96
- def download(item: Request) -> Union[Seed, BaseItem, Response, str]:
97
- seed_dict = item.seed.to_dict
98
- bucket_name = oss_util.bucket
99
- try:
100
- key = item.seed.oss_path or getattr(item, "oss_path", None)
101
- if oss_util.exists(key):
102
- content_length = oss_util.head(key).content_length
103
- yield Response(item.seed, "exists", bucket_name=bucket_name, data_size=content_length, **seed_dict)
104
-
105
- end = seed_dict.get("end", "")
106
- start = seed_dict.get("start", "0")
107
-
108
- if end or int(start):
109
- item.request_setting["headers"]['Range'] = f'bytes={start}-{end}'
110
-
111
- if not item.seed.params.identifier:
112
- content = b""
113
- chunk_size = oss_util.chunk_size
114
- min_upload_size = oss_util.min_upload_size
115
- position = seed_dict.get("position", 1)
116
-
117
- response = item.download()
118
-
119
- content_length = int(response.headers.get("content-length", 0))
120
- content_type = response.headers.get("content-type", "").split(";")[0]
121
- if content_type and content_type in setting.FILE_FILTER_CONTENT_TYPE:
122
- yield Response(
123
- item.seed, response, filter=True, msg=f"response content type is {content_type}",
124
- bucket_name=bucket_name, data_size=content_length, **seed_dict
125
- )
126
- response.close()
127
- elif position == 1 and min_upload_size >= content_length > 0:
128
- """过小文件标识返回"""
129
- yield Response(
130
- item.seed, response, filter=True, msg="file size is too small",
131
- bucket_name=bucket_name, data_size=content_length, **seed_dict
132
- )
133
- response.close()
134
- elif position == 1 and chunk_size > content_length > min_upload_size:
135
- """小文件直接下载"""
136
- for part_data in response.iter_content(chunk_size):
137
- content += part_data
138
- oss_util.put(key, content)
139
- yield Response(item.seed, response, bucket_name=bucket_name, data_size=content_length, **seed_dict)
140
- response.close()
141
- else:
142
- """中大文件同步分片下载"""
143
- upload_content_length = 0
144
- if not seed_dict.get("upload_id"):
145
- seed_dict["upload_id"] = oss_util.init_part(key).upload_id
146
- upload_id = seed_dict["upload_id"]
147
- for part_data in response.iter_content(chunk_size):
148
- content += part_data
149
- if len(content) >= chunk_size:
150
- upload_data = content[:chunk_size]
151
- content = content[chunk_size:]
152
- oss_util.put_part(key, upload_id, position, upload_data)
153
- upload_content_length += len(upload_data)
154
- position += 1
155
- seed_dict['position'] = position
156
- seed_dict['start'] = upload_content_length
157
-
158
- if content:
159
- oss_util.put_part(key, upload_id, position, content)
160
- content_length += len(content)
161
- oss_util.merge(key, upload_id)
162
- yield Response(item.seed, response, bucket_name=bucket_name, data_size=content_length, **seed_dict)
163
- response.close()
164
-
165
- elif item.seed.params.identifier == "merge":
166
- oss_util.merge(key, seed_dict["upload_id"])
167
- content_length = oss_util.head(key).content_length
168
- yield Response(item.seed, "merge", bucket_name=bucket_name, data_size=content_length, **seed_dict)
169
-
170
- except OssDBPutPartError:
171
- yield Seed(seed_dict)
172
- except OssDBMergeError:
173
- yield Seed(seed_dict, identifier="merge")