crawlo 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

@@ -5,16 +5,173 @@
5
5
  # @Author : crawl-coder
6
6
  # @Desc : 时间工具
7
7
  """
8
- from datetime import datetime
8
+ import dateparser
9
+ from typing import Optional, Union
10
+ from datetime import datetime, timedelta
11
+ from dateutil.relativedelta import relativedelta
9
12
 
13
+ # 常见时间格式列表
14
+ COMMON_FORMATS = [
15
+ "%Y-%m-%d %H:%M:%S",
16
+ "%Y/%m/%d %H:%M:%S",
17
+ "%d-%m-%Y %H:%M:%S",
18
+ "%d/%m/%Y %H:%M:%S",
19
+ "%Y-%m-%d",
20
+ "%Y/%m/%d",
21
+ "%d-%m-%Y",
22
+ "%d/%m/%Y",
23
+ "%b %d, %Y", # Jan 01, 2023
24
+ "%B %d, %Y", # January 01, 2023
25
+ "%Y年%m月%d日", # 2023年01月01日
26
+ "%Y年%m月%d日 %H时%M分%S秒", # 2023年01月01日 12时30分45秒
27
+ "%a %b %d %H:%M:%S %Y", # Wed Jan 01 12:00:00 2020
28
+ "%a, %d %b %Y %H:%M:%S", # Wed, 01 Jan 2020 12:00:00
29
+ "%Y-%m-%dT%H:%M:%S.%f", # ✅ 新增:ISO 8601 格式(带毫秒)
30
+ ]
10
31
 
11
- def now():
12
- return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
13
32
 
33
+ def normalize_time(time_str: str) -> Optional[datetime]:
34
+ """
35
+ 尝试使用常见格式解析时间字符串。
14
36
 
15
- def date_delta(start, end):
16
- start = datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
17
- end = datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
37
+ :param time_str: 时间字符串(如 "2023-01-01 12:00:00")
38
+ :return: 解析成功返回 datetime 对象,失败返回 None 或抛出异常(可选)
39
+ """
40
+ for fmt in COMMON_FORMATS:
41
+ try:
42
+ return datetime.strptime(time_str, fmt)
43
+ except ValueError:
44
+ continue
45
+ raise ValueError(f"无法解析时间字符串:{time_str}")
46
+
47
+
48
+ def get_current_time(fmt: str = '%Y-%m-%d %H:%M:%S'):
49
+ """
50
+ 获取当前时间,根据是否传入格式化参数决定返回类型
51
+ :param fmt: 格式化字符串,如 "%Y-%m-%d %H:%M:%S"
52
+ :return: datetime 或 str
53
+ """
54
+ dt = datetime.now()
55
+ if fmt is not None:
56
+ return dt.strftime(fmt)
57
+ return dt
58
+
59
+
60
+ def time_diff_seconds(start_time: str, end_time: str, fmt: str = '%Y-%m-%d %H:%M:%S'):
61
+ """
62
+ 计算两个时间字符串之间的秒数差。
63
+
64
+ :param start_time: 起始时间字符串
65
+ :param end_time: 结束时间字符串
66
+ :param fmt: 时间格式,默认为 '%Y-%m-%d %H:%M:%S'
67
+ :return: 秒数差(总是正整数)
68
+ """
69
+ start = datetime.strptime(start_time, fmt)
70
+ end = datetime.strptime(end_time, fmt)
18
71
  delta = end - start
19
- seconds = delta.total_seconds()
20
- return int(seconds)
72
+ return int(delta.total_seconds())
73
+
74
+
75
+ TimeType = Union[str, datetime]
76
+
77
+
78
+ def time_diff(start: TimeType, end: TimeType, fmt: str = None, unit='seconds', auto_parse=True) -> Optional[int]:
79
+ """
80
+ 计算两个时间之间的差值(支持字符串或 datetime)。
81
+
82
+ :param start: 起始时间(字符串或 datetime)
83
+ :param end: 结束时间(字符串或 datetime)
84
+ :param fmt: 时间格式(如果传入字符串且 auto_parse=False 时需要)
85
+ :param unit: 单位(seconds, minutes, hours, days)
86
+ :param auto_parse: 是否自动尝试解析任意格式的字符串(推荐开启)
87
+ :return: 差值整数(根据 unit 返回),失败返回 None
88
+ """
89
+
90
+ def ensure_datetime(t):
91
+ if isinstance(t, datetime):
92
+ return t
93
+ elif isinstance(t, str):
94
+ if auto_parse:
95
+ parsed = normalize_time(t)
96
+ if parsed:
97
+ return parsed
98
+ if fmt:
99
+ return datetime.strptime(t, fmt)
100
+ raise ValueError("字符串时间未提供格式,或无法自动解析")
101
+ else:
102
+ raise TypeError(f"不支持的时间类型: {type(t)}")
103
+
104
+ start_dt = ensure_datetime(start)
105
+ end_dt = ensure_datetime(end)
106
+
107
+ delta = (end_dt - start_dt)
108
+ abs_seconds = int(abs(delta.total_seconds()))
109
+
110
+ if unit == 'seconds':
111
+ return abs_seconds
112
+ elif unit == 'minutes':
113
+ return abs_seconds // 60
114
+ elif unit == 'hours':
115
+ return abs_seconds // 3600
116
+ elif unit == 'days':
117
+ return abs_seconds // 86400
118
+ else:
119
+ raise ValueError(f"Unsupported unit: {unit}")
120
+
121
+
122
+ def format_datetime(dt, fmt="%Y-%m-%d %H:%M:%S"):
123
+ """格式化时间"""
124
+ return dt.strftime(fmt)
125
+
126
+
127
+ def parse_datetime(s, fmt="%Y-%m-%d %H:%M:%S"):
128
+ """将字符串解析为 datetime 对象"""
129
+ return datetime.strptime(s, fmt)
130
+
131
+
132
+ def datetime_to_timestamp(dt):
133
+ """将 datetime 转换为时间戳"""
134
+ return dt.timestamp()
135
+
136
+
137
+ def timestamp_to_datetime(ts):
138
+ """将时间戳转换为 datetime"""
139
+ return datetime.fromtimestamp(ts)
140
+
141
+
142
+ def add_days(dt, days=0):
143
+ """日期加减(天)"""
144
+ return dt + timedelta(days=days)
145
+
146
+
147
+ def add_months(dt, months=0):
148
+ """日期加减(月)"""
149
+ return dt + relativedelta(months=months)
150
+
151
+
152
+ def days_between(dt1, dt2):
153
+ """计算两个日期之间的天数差"""
154
+ return abs((dt2 - dt1).days)
155
+
156
+
157
+ def is_leap_year(year):
158
+ """判断是否是闰年"""
159
+ return (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0)
160
+
161
+
162
+ def parse_relative_time(time_str: str) -> str:
163
+ """
164
+ 解析相对时间字符串(如 "3分钟前"、"昨天")为 datetime 对象。
165
+ """
166
+ dt = dateparser.parse(time_str)
167
+ return dt.isoformat()
168
+
169
+
170
+ if __name__ == '__main__':
171
+ print(normalize_time(parse_relative_time("30分钟前")))
172
+ print(parse_relative_time("昨天"))
173
+ print(parse_relative_time("10小时前"))
174
+ print(parse_relative_time("1个月前"))
175
+ print(parse_relative_time("10天前"))
176
+ print(parse_relative_time("2024年1月1日"))
177
+ print(parse_relative_time('2025年5月30日'))
@@ -1,22 +1,82 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from typing import Callable
1
+ # -*- coding: UTF-8 -*-
2
+ from typing import Union, AsyncGenerator, Generator
4
3
  from inspect import isgenerator, isasyncgen
4
+ from crawlo import Response, Request, Item
5
5
  from crawlo.exceptions import TransformTypeError
6
6
 
7
+ T = Union[Request, Item]
8
+
9
+
10
+ async def transform(
11
+ func: Union[Generator[T, None, None], AsyncGenerator[T, None]],
12
+ response: Response
13
+ ) -> AsyncGenerator[Union[T, Exception], None]:
14
+ """
15
+ 转换回调函数的输出为统一异步生成器
16
+
17
+ Args:
18
+ func: 同步或异步生成器函数
19
+ response: 当前响应对象
20
+
21
+ Yields:
22
+ Union[T, Exception]: 生成请求/Item或异常对象
23
+
24
+ Raises:
25
+ TransformTypeError: 当输入类型不符合要求时
26
+ """
27
+
28
+ def _set_meta(obj: T) -> T:
29
+ """统一设置请求的depth元数据"""
30
+ if isinstance(obj, Request):
31
+ obj.meta.setdefault('depth', response.meta.get('depth', 0))
32
+ return obj
33
+
34
+ # 类型检查前置
35
+ if not (isgenerator(func) or isasyncgen(func)):
36
+ raise TransformTypeError(
37
+ f'Callback must return generator or async generator, got {type(func).__name__}'
38
+ )
7
39
 
8
- async def transform(func: Callable):
9
40
  try:
10
41
  if isgenerator(func):
11
- for f in func:
12
- yield f
13
- elif isasyncgen(func):
14
- async for f in func:
15
- yield f
42
+ # 同步生成器处理
43
+ for item in func:
44
+ yield _set_meta(item)
16
45
  else:
17
- raise TransformTypeError(
18
- f'callback return type error: {type(func)} must be `generator` or `async generator`'
19
- )
20
- except Exception as exp:
21
- yield exp
46
+ # 异步生成器处理
47
+ async for item in func:
48
+ yield _set_meta(item)
49
+
50
+ except Exception as e:
51
+ yield e
52
+
53
+ # #!/usr/bin/python
54
+ # # -*- coding:UTF-8 -*-
55
+ # from typing import Callable, Union
56
+ # from inspect import isgenerator, isasyncgen
57
+ # from crawlo import Response, Request, Item
58
+ # from crawlo.exceptions import TransformTypeError
59
+ #
60
+ #
61
+ # T = Union[Request, Item]
62
+ #
63
+ #
64
+ # async def transform(func: Callable, response: Response):
65
+ # def set_request(t: T) -> T:
66
+ # if isinstance(t, Request):
67
+ # t.meta['depth'] = response.meta['depth']
68
+ # return t
69
+ # try:
70
+ # if isgenerator(func):
71
+ # for f in func:
72
+ # yield set_request(f)
73
+ # elif isasyncgen(func):
74
+ # async for f in func:
75
+ # yield set_request(f)
76
+ # else:
77
+ # raise TransformTypeError(
78
+ # f'callback return type error: {type(func)} must be `generator` or `async generator`'
79
+ # )
80
+ # except Exception as exp:
81
+ # yield exp
22
82
 
crawlo/utils/pqueue.py CHANGED
@@ -1,16 +1,174 @@
1
- #!/usr/bin/python
2
1
  # -*- coding:UTF-8 -*-
2
+ import sys
3
3
  import asyncio
4
- from asyncio import PriorityQueue, TimeoutError
4
+ import warnings
5
+ from urllib.parse import urlparse
6
+ from asyncio import PriorityQueue
7
+ from redis.asyncio import from_url
8
+ from typing import Any, Optional, Dict, Annotated
9
+ from pydantic import (
10
+ BaseModel,
11
+ Field,
12
+ model_validator
13
+ )
14
+
15
+ from crawlo import Request
16
+ from crawlo.settings.default_settings import REDIS_URL
5
17
 
6
18
 
7
19
  class SpiderPriorityQueue(PriorityQueue):
8
- def __init__(self, maxsize=0):
9
- super(SpiderPriorityQueue, self).__init__(maxsize=maxsize)
20
+ """带超时功能的异步优先级队列"""
21
+
22
+ def __init__(self, maxsize: int = 0) -> None:
23
+ """初始化队列,maxsize为0表示无大小限制"""
24
+ super().__init__(maxsize)
25
+
26
+ async def get(self, timeout: float = 0.1) -> Optional[Request]:
27
+ """
28
+ 异步获取队列元素,带超时功能
29
+
30
+ Args:
31
+ timeout: 超时时间(秒),默认0.1秒
10
32
 
11
- async def get(self):
12
- fut = super().get()
33
+ Returns:
34
+ 队列元素(优先级, )或None(超时)
35
+ """
13
36
  try:
14
- return await asyncio.wait_for(fut, timeout=0.1)
15
- except TimeoutError:
37
+ # 根据Python版本选择超时实现方式
38
+ if sys.version_info >= (3, 11):
39
+ async with asyncio.timeout(timeout):
40
+ return await super().get()
41
+ else:
42
+ return await asyncio.wait_for(super().get(), timeout=timeout)
43
+ except asyncio.TimeoutError:
16
44
  return None
45
+
46
+
47
+ class TaskModel(BaseModel):
48
+ """爬虫任务数据模型 (完全兼容Pydantic V2)"""
49
+ url: Annotated[str, Field(min_length=1, max_length=2000, examples=["https://example.com"])]
50
+ meta: Dict[str, Any] = Field(default_factory=dict)
51
+ priority: Annotated[int, Field(default=0, ge=0, le=10, description="0=最高优先级")]
52
+
53
+ @classmethod
54
+ def validate_url(cls, v: str) -> str:
55
+ """验证URL格式"""
56
+ if not v.startswith(('http://', 'https://')):
57
+ raise ValueError('URL必须以 http:// 或 https:// 开头')
58
+
59
+ parsed = urlparse(v)
60
+ if not parsed.netloc:
61
+ raise ValueError('URL缺少有效域名')
62
+
63
+ return v.strip()
64
+
65
+ @model_validator(mode='after')
66
+ def validate_priority_logic(self) -> 'TaskModel':
67
+ """跨字段验证示例"""
68
+ if 'admin' in self.url and self.priority > 5:
69
+ self.priority = 5 # 自动调整管理页面的优先级
70
+ return self
71
+
72
+
73
+ class DistributedPriorityQueue:
74
+ def __init__(
75
+ self,
76
+ redis_url: str,
77
+ queue_name: str = "spider_queue",
78
+ max_connections: int = 10,
79
+ health_check_interval: int = 30
80
+ ):
81
+ """
82
+ Args:
83
+ redis_url: redis://[:password]@host:port[/db]
84
+ queue_name: Redis有序集合键名
85
+ max_connections: 连接池大小
86
+ health_check_interval: 连接健康检查间隔(秒)
87
+ """
88
+ self.redis = from_url(
89
+ redis_url,
90
+ max_connections=max_connections,
91
+ health_check_interval=health_check_interval,
92
+ socket_keepalive=True,
93
+ decode_responses=True
94
+ )
95
+ self.queue_name = queue_name
96
+
97
+ async def put(self, task: TaskModel) -> bool:
98
+ """
99
+ 添加任务到队列(使用Pydantic V2的model_dump_json)
100
+
101
+ Args:
102
+ task: 已验证的TaskModel实例
103
+
104
+ Returns:
105
+ bool: 是否成功添加 (Redis的ZADD返回添加数量)
106
+ """
107
+ with warnings.catch_warnings():
108
+ warnings.simplefilter("ignore", category=DeprecationWarning)
109
+ task_str = task.model_dump_json() # 正确使用V2的序列化方法
110
+ return await self.redis.zadd(
111
+ self.queue_name,
112
+ {task_str: task.priority}
113
+ ) > 0
114
+
115
+ async def get(self, timeout: float = 1.0) -> Optional[TaskModel]:
116
+ """
117
+ 获取优先级最高的任务(自动验证)
118
+
119
+ Args:
120
+ timeout: 阻塞超时时间(秒)
121
+
122
+ Returns:
123
+ TaskModel实例或None(超时/队列空)
124
+ """
125
+ try:
126
+ result = await self.redis.bzpopmax(
127
+ self.queue_name,
128
+ timeout=timeout
129
+ )
130
+ if result:
131
+ _, task_str, _ = result
132
+ with warnings.catch_warnings():
133
+ warnings.simplefilter("ignore", category=DeprecationWarning)
134
+ return TaskModel.model_validate_json(task_str) # 正确使用V2的反序列化方法
135
+ except Exception as e:
136
+ print(f"任务获取失败: {type(e).__name__}: {e}")
137
+ return None
138
+
139
+ async def aclose(self):
140
+ """安全关闭连接"""
141
+ await self.redis.aclose()
142
+
143
+ async def __aenter__(self):
144
+ return self
145
+
146
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
147
+ await self.aclose()
148
+
149
+
150
+ # 使用示例
151
+ async def demo():
152
+ async with DistributedPriorityQueue(
153
+ REDIS_URL,
154
+ max_connections=20,
155
+ health_check_interval=10
156
+ ) as queue:
157
+ # 添加任务(自动触发验证)
158
+ task = TaskModel(
159
+ url="https://example.com/1",
160
+ priority=1,
161
+ meta={"depth": 2}
162
+ )
163
+
164
+ if await queue.put(task):
165
+ print(f"任务添加成功: {task.url}")
166
+
167
+ # 获取任务
168
+ if result := await queue.get(timeout=2.0):
169
+ print(f"获取任务: {result.url} (优先级={result.priority})")
170
+ print(f"元数据: {result.meta}")
171
+
172
+
173
+ if __name__ == "__main__":
174
+ asyncio.run(demo())
crawlo/utils/project.py CHANGED
@@ -17,8 +17,9 @@ def _get_closest(path='.'):
17
17
  def _init_env():
18
18
  closest = _get_closest()
19
19
  if closest:
20
- project_dir = os.path.dirname(closest)
21
- sys.path.append(project_dir)
20
+ sys.path.append(closest)
21
+ # project_dir = os.path.dirname(closest)
22
+ # sys.path.append(project_dir)
22
23
 
23
24
 
24
25
  def get_settings(settings='settings'):
@@ -0,0 +1,85 @@
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-07-08 08:55
5
+ # @Author : crawl-coder
6
+ # @Desc : None
7
+ """
8
+ import json
9
+ import hashlib
10
+ from typing import Any, Optional, Iterable, Union
11
+ from w3lib.url import canonicalize_url
12
+
13
+ from crawlo import Request
14
+
15
+
16
+ def to_bytes(data: Any, encoding='utf-8') -> bytes:
17
+ """
18
+ 将各种类型统一转换为 bytes。
19
+ 支持 str, bytes, dict, None 及其他可转为字符串的类型。
20
+ """
21
+ if isinstance(data, bytes):
22
+ return data
23
+ if isinstance(data, str):
24
+ return data.encode(encoding)
25
+ if isinstance(data, dict):
26
+ return json.dumps(data, sort_keys=True, ensure_ascii=False).encode(encoding)
27
+ if data is None:
28
+ return b''
29
+ return str(data).encode(encoding)
30
+
31
+
32
+ def request_fingerprint(
33
+ request: Request,
34
+ include_headers: Optional[Iterable[Union[bytes, str]]] = None
35
+ ) -> str:
36
+ """
37
+ 生成请求指纹,基于方法、标准化 URL、body 和可选的 headers。
38
+ 使用 SHA256 哈希算法以提高安全性。
39
+
40
+ :param request: Request 对象(需包含 method, url, body, headers)
41
+ :param include_headers: 指定要参与指纹计算的 header 名称列表(str 或 bytes)
42
+ :return: 请求指纹(hex string)
43
+ """
44
+ hash_func = hashlib.sha256()
45
+
46
+ # 基本字段
47
+ hash_func.update(to_bytes(request.method))
48
+ hash_func.update(to_bytes(canonicalize_url(request.url)))
49
+ hash_func.update(request.body or b'')
50
+
51
+ # 处理 headers
52
+ if include_headers:
53
+ headers = request.headers # 假设 headers 是类似字典或 MultiDict 的结构
54
+ for header_name in include_headers:
55
+ name_bytes = to_bytes(header_name).lower() # 统一转为小写进行匹配
56
+ value = b''
57
+
58
+ # 兼容 headers 的访问方式(如 MultiDict 或 dict)
59
+ if hasattr(headers, 'get_all'):
60
+ # 如 scrapy.http.Headers 的 get_all 方法
61
+ values = headers.get_all(name_bytes)
62
+ value = b';'.join(values) if values else b''
63
+ elif hasattr(headers, '__getitem__'):
64
+ # 如普通 dict
65
+ try:
66
+ raw_value = headers[name_bytes]
67
+ if isinstance(raw_value, list):
68
+ value = b';'.join(to_bytes(v) for v in raw_value)
69
+ else:
70
+ value = to_bytes(raw_value)
71
+ except (KeyError, TypeError):
72
+ value = b''
73
+ else:
74
+ value = b''
75
+
76
+ hash_func.update(name_bytes + b':' + value)
77
+
78
+ return hash_func.hexdigest()
79
+
80
+
81
+ def set_request(request: Request, priority: int) -> None:
82
+ request.meta['depth'] = request.meta.setdefault('depth', 0) + 1
83
+ if priority:
84
+ request.priority -= request.meta['depth'] * priority
85
+
crawlo/utils/url.py ADDED
@@ -0,0 +1,40 @@
1
+ from urllib.parse import urldefrag
2
+ from w3lib.url import add_or_replace_parameter
3
+
4
+
5
+ def escape_ajax(url: str) -> str:
6
+ """
7
+ 根据Google AJAX爬取规范转换URL(处理哈希片段#!):
8
+ https://developers.google.com/webmasters/ajax-crawling/docs/getting-started
9
+
10
+ 规则说明:
11
+ 1. 仅当URL包含 `#!` 时才转换(表示这是AJAX可爬取页面)
12
+ 2. 将 `#!key=value` 转换为 `?_escaped_fragment_=key%3Dvalue`
13
+ 3. 保留原始查询参数(如果有)
14
+
15
+ 示例:
16
+ >>> escape_ajax("www.example.com/ajax.html#!key=value")
17
+ 'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
18
+ >>> escape_ajax("www.example.com/ajax.html?k1=v1#!key=value")
19
+ 'www.example.com/ajax.html?k1=v1&_escaped_fragment_=key%3Dvalue'
20
+ >>> escape_ajax("www.example.com/ajax.html#!")
21
+ 'www.example.com/ajax.html?_escaped_fragment_='
22
+
23
+ 非AJAX可爬取的URL(无#!)原样返回:
24
+ >>> escape_ajax("www.example.com/ajax.html#normal")
25
+ 'www.example.com/ajax.html#normal'
26
+ """
27
+ # 分离URL的基础部分和哈希片段
28
+ de_frag, frag = urldefrag(url)
29
+
30
+ # 仅处理以"!"开头的哈希片段(Google规范)
31
+ if not frag.startswith("!"):
32
+ return url # 不符合规则则原样返回
33
+
34
+ # 调用辅助函数添加 `_escaped_fragment_` 参数
35
+ return add_or_replace_parameter(de_frag, "_escaped_fragment_", frag[1:])
36
+
37
+
38
+ if __name__ == '__main__':
39
+ f = escape_ajax('http://example.com/page#!')
40
+ print(f)
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: crawlo
3
- Version: 1.0.0
4
- Summary: feapder是一款支持异步的python爬虫框架
3
+ Version: 1.0.1
4
+ Summary: Crawlo是一款支持异步的python爬虫框架
5
5
  Home-page: https://github.com/crawl-coder/Crawlo.git
6
6
  Author: crawl-coder
7
7
  Author-email: crawlo@qq.com