crawlo 1.0.2__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crawlo might be problematic. Click here for more details.

Files changed (79) hide show
  1. crawlo/__init__.py +9 -6
  2. crawlo/__version__.py +1 -2
  3. crawlo/core/__init__.py +2 -2
  4. crawlo/core/engine.py +158 -158
  5. crawlo/core/processor.py +40 -40
  6. crawlo/core/scheduler.py +57 -59
  7. crawlo/crawler.py +242 -222
  8. crawlo/downloader/__init__.py +78 -78
  9. crawlo/downloader/aiohttp_downloader.py +259 -96
  10. crawlo/downloader/httpx_downloader.py +187 -48
  11. crawlo/downloader/playwright_downloader.py +160 -160
  12. crawlo/event.py +11 -11
  13. crawlo/exceptions.py +64 -64
  14. crawlo/extension/__init__.py +31 -31
  15. crawlo/extension/log_interval.py +49 -49
  16. crawlo/extension/log_stats.py +44 -44
  17. crawlo/filters/__init__.py +37 -37
  18. crawlo/filters/aioredis_filter.py +150 -130
  19. crawlo/filters/memory_filter.py +202 -203
  20. crawlo/items/__init__.py +62 -62
  21. crawlo/items/items.py +118 -118
  22. crawlo/middleware/__init__.py +21 -21
  23. crawlo/middleware/default_header.py +32 -32
  24. crawlo/middleware/download_delay.py +28 -28
  25. crawlo/middleware/middleware_manager.py +140 -140
  26. crawlo/middleware/request_ignore.py +30 -30
  27. crawlo/middleware/response_code.py +18 -18
  28. crawlo/middleware/response_filter.py +26 -26
  29. crawlo/middleware/retry.py +90 -90
  30. crawlo/network/__init__.py +7 -7
  31. crawlo/network/request.py +204 -233
  32. crawlo/network/response.py +166 -162
  33. crawlo/pipelines/__init__.py +13 -13
  34. crawlo/pipelines/console_pipeline.py +39 -39
  35. crawlo/pipelines/mongo_pipeline.py +116 -116
  36. crawlo/pipelines/mysql_batch_pipline.py +133 -133
  37. crawlo/pipelines/mysql_pipeline.py +195 -195
  38. crawlo/pipelines/pipeline_manager.py +56 -56
  39. crawlo/settings/__init__.py +7 -7
  40. crawlo/settings/default_settings.py +94 -89
  41. crawlo/settings/setting_manager.py +99 -99
  42. crawlo/spider/__init__.py +36 -36
  43. crawlo/stats_collector.py +59 -47
  44. crawlo/subscriber.py +106 -106
  45. crawlo/task_manager.py +27 -27
  46. crawlo/templates/item_template.tmpl +21 -21
  47. crawlo/templates/project_template/main.py +32 -32
  48. crawlo/templates/project_template/setting.py +189 -189
  49. crawlo/templates/spider_template.tmpl +30 -30
  50. crawlo/utils/__init__.py +7 -7
  51. crawlo/utils/concurrency_manager.py +124 -124
  52. crawlo/utils/date_tools.py +177 -177
  53. crawlo/utils/func_tools.py +82 -82
  54. crawlo/utils/log.py +39 -39
  55. crawlo/utils/pqueue.py +173 -173
  56. crawlo/utils/project.py +59 -59
  57. crawlo/utils/request.py +122 -85
  58. crawlo/utils/system.py +11 -11
  59. crawlo/utils/tools.py +302 -302
  60. crawlo/utils/url.py +39 -39
  61. {crawlo-1.0.2.dist-info → crawlo-1.0.4.dist-info}/METADATA +48 -48
  62. crawlo-1.0.4.dist-info/RECORD +79 -0
  63. {crawlo-1.0.2.dist-info → crawlo-1.0.4.dist-info}/top_level.txt +1 -0
  64. tests/__init__.py +7 -0
  65. tests/baidu_spider/__init__.py +7 -0
  66. tests/baidu_spider/demo.py +94 -0
  67. tests/baidu_spider/items.py +25 -0
  68. tests/baidu_spider/middleware.py +49 -0
  69. tests/baidu_spider/pipeline.py +55 -0
  70. tests/baidu_spider/request_fingerprints.txt +9 -0
  71. tests/baidu_spider/run.py +27 -0
  72. tests/baidu_spider/settings.py +80 -0
  73. tests/baidu_spider/spiders/__init__.py +7 -0
  74. tests/baidu_spider/spiders/bai_du.py +61 -0
  75. tests/baidu_spider/spiders/sina.py +79 -0
  76. crawlo/filters/redis_filter.py +0 -120
  77. crawlo-1.0.2.dist-info/RECORD +0 -68
  78. {crawlo-1.0.2.dist-info → crawlo-1.0.4.dist-info}/WHEEL +0 -0
  79. {crawlo-1.0.2.dist-info → crawlo-1.0.4.dist-info}/entry_points.txt +0 -0
@@ -1,89 +1,94 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- # 默认项目名称
4
- import os
5
-
6
- PROJECT_NAME = 'crawlo'
7
- VERSION = 1.0
8
- # 并发数
9
- CONCURRENCY = 8
10
-
11
- # 下载超时时长
12
- DOWNLOAD_TIMEOUT = 60
13
-
14
- INTERVAL = 5
15
-
16
- # --------------------------------------------------- delay ------------------------------------------------------------
17
- # 下载延迟,默认关闭
18
- DOWNLOAD_DELAY = 0
19
- # 下载延迟范围
20
- RANDOM_RANGE = (0.75, 1.25)
21
- # 是否需要随机
22
- RANDOMNESS = True
23
-
24
- # --------------------------------------------------- retry ------------------------------------------------------------
25
- MAX_RETRY_TIMES = 2
26
- IGNORE_HTTP_CODES = [403, 404]
27
- RETRY_HTTP_CODES = [408, 429, 500, 502, 503, 504, 522, 524]
28
- # 允许通过的状态码
29
- ALLOWED_CODES = []
30
- # 请求优先级设置
31
- RETRY_PRIORITY = -1
32
- #
33
- DEPTH_PRIORITY = 1
34
-
35
- STATS_DUMP = True
36
- # ssl 验证
37
- VERIFY_SSL = True
38
- # 是否使用同一个session
39
- USE_SESSION = True
40
- # 日志级别
41
- LOG_LEVEL = 'DEBUG'
42
- # 选择下载器
43
- DOWNLOADER = "crawlo.downloader.aiohttp_downloader.AioHttpDownloader" # HttpXDownloader
44
-
45
- EXTENSIONS = []
46
-
47
- # --------------------------------------------------- 公共MySQL配置 -----------------------------------------------------
48
- MYSQL_HOST = '127.0.0.1'
49
- MYSQL_PORT = 3306
50
- MYSQL_USER = 'scrapy_user'
51
- MYSQL_PASSWORD = 'your_password'
52
- MYSQL_DB = 'scrapy_data'
53
- MYSQL_TABLE = 'crawled_data'
54
-
55
- # asyncmy专属配置
56
- MYSQL_POOL_MIN = 5 # 连接池最小连接数
57
- MYSQL_POOL_MAX = 20 # 连接池最大连接数
58
- MYSQL_ECHO = False
59
-
60
- # 批量插入大小
61
- MYSQL_BATCH_SIZE = 100
62
-
63
- # --------------------------------------------------- MongoDB 基础配置 -----------------------------------------------------
64
- MONGO_URI = 'mongodb://user:password@host:27017'
65
- MONGO_DATABASE = 'scrapy_data'
66
- MONGO_COLLECTION = 'crawled_items' # 可选,默认使用spider名称
67
-
68
- # 连接池优化配置(仅方案二需要)
69
- MONGO_MAX_POOL_SIZE = 200 # 最大连接数
70
- MONGO_MIN_POOL_SIZE = 20 # 最小保持连接数
71
-
72
- # 启用管道
73
- PIPELINES = [
74
- 'crawlo.pipelines.console_pipeline.ConsolePipeline',
75
- ]
76
-
77
- # filter
78
- REQUEST_DIR = '.'
79
- FILTER_DEBUG = True
80
- FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
81
-
82
- # redis filter
83
- CLEANUP_FP = False
84
- DECODE_RESPONSES = True
85
- REDIS_KEY = 'request_fingerprint'
86
- REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
87
- REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', 'oscar&0503')
88
- REDIS_PORT = os.getenv('REDIS_PORT', 6379)
89
- REDIS_URL = f'redis://:{REDIS_PASSWORD or ""}@{REDIS_HOST}:{REDIS_PORT}/0'
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ # 默认项目名称
4
+ import os
5
+
6
+ PROJECT_NAME = 'crawlo'
7
+ VERSION = 1.0
8
+ # 并发数
9
+ CONCURRENCY = 8
10
+
11
+ # 下载超时时长
12
+ DOWNLOAD_TIMEOUT = 60
13
+
14
+ INTERVAL = 5
15
+
16
+ # --------------------------------------------------- delay ------------------------------------------------------------
17
+ # 下载延迟,默认关闭
18
+ DOWNLOAD_DELAY = 0
19
+ # 下载延迟范围
20
+ RANDOM_RANGE = (0.75, 1.25)
21
+ # 是否需要随机
22
+ RANDOMNESS = True
23
+
24
+ # --------------------------------------------------- retry ------------------------------------------------------------
25
+ MAX_RETRY_TIMES = 2
26
+ IGNORE_HTTP_CODES = [403, 404]
27
+ RETRY_HTTP_CODES = [408, 429, 500, 502, 503, 504, 522, 524]
28
+ # 允许通过的状态码
29
+ ALLOWED_CODES = []
30
+ # 请求优先级设置
31
+ RETRY_PRIORITY = -1
32
+ #
33
+ DEPTH_PRIORITY = 1
34
+
35
+ STATS_DUMP = True
36
+ # ssl 验证
37
+ VERIFY_SSL = True
38
+ # 是否使用同一个session
39
+ USE_SESSION = True
40
+ # 日志级别
41
+ LOG_LEVEL = 'DEBUG'
42
+ # 选择下载器
43
+ DOWNLOADER = "crawlo.downloader.aiohttp_downloader.AioHttpDownloader" # HttpXDownloader
44
+
45
+ # --------------------------------------------------- 公共MySQL配置 -----------------------------------------------------
46
+ MYSQL_HOST = '127.0.0.1'
47
+ MYSQL_PORT = 3306
48
+ MYSQL_USER = 'scrapy_user'
49
+ MYSQL_PASSWORD = 'your_password'
50
+ MYSQL_DB = 'scrapy_data'
51
+ MYSQL_TABLE = 'crawled_data'
52
+
53
+ # asyncmy专属配置
54
+ MYSQL_POOL_MIN = 5 # 连接池最小连接数
55
+ MYSQL_POOL_MAX = 20 # 连接池最大连接数
56
+ MYSQL_ECHO = False
57
+
58
+ # 批量插入大小
59
+ MYSQL_BATCH_SIZE = 100
60
+
61
+ # --------------------------------------------------- MongoDB 基础配置 -----------------------------------------------------
62
+ MONGO_URI = 'mongodb://user:password@host:27017'
63
+ MONGO_DATABASE = 'scrapy_data'
64
+ MONGO_COLLECTION = 'crawled_items' # 可选,默认使用spider名称
65
+
66
+ # 连接池优化配置(仅方案二需要)
67
+ MONGO_MAX_POOL_SIZE = 200 # 最大连接数
68
+ MONGO_MIN_POOL_SIZE = 20 # 最小保持连接数
69
+
70
+ # 启用管道
71
+ PIPELINES = [
72
+ 'crawlo.pipelines.console_pipeline.ConsolePipeline',
73
+ ]
74
+
75
+
76
+ EXTENSIONS = [
77
+ 'crawlo.extension.log_interval.LogIntervalExtension',
78
+ 'crawlo.extension.log_stats.LogStats'
79
+ ]
80
+
81
+ # filter
82
+ REQUEST_DIR = '.'
83
+ FILTER_DEBUG = True
84
+ FILTER_CLASS = 'crawlo.filters.memory_filter.MemoryFilter'
85
+
86
+ # redis filter
87
+ REDIS_TTL = 0
88
+ CLEANUP_FP = 0
89
+ DECODE_RESPONSES = True
90
+ REDIS_KEY = 'request_fingerprint'
91
+ REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
92
+ REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', 'oscar&0503')
93
+ REDIS_PORT = os.getenv('REDIS_PORT', 6379)
94
+ REDIS_URL = f'redis://:{REDIS_PASSWORD or ""}@{REDIS_HOST}:{REDIS_PORT}/0'
@@ -1,100 +1,100 @@
1
- #!/usr/bin/python
2
- # -*- coding: UTF-8 -*-
3
- import json
4
- from copy import deepcopy
5
- from importlib import import_module
6
- from collections.abc import MutableMapping
7
-
8
- from crawlo.settings import default_settings
9
-
10
-
11
- class SettingManager(MutableMapping):
12
-
13
- def __init__(self, values=None):
14
- self.attributes = {}
15
- self.set_settings(default_settings)
16
- self.update_attributes(values)
17
-
18
- def get(self, key, default=None):
19
- """安全获取值,不触发递归"""
20
- value = self.attributes.get(key, default)
21
- return value if value is not None else default
22
-
23
- def get_int(self, key, default=0):
24
- return int(self.get(key, default=default))
25
-
26
- def get_float(self, key, default=0.0):
27
- return float(self.get(key, default=default))
28
-
29
- def get_bool(self, key, default=False):
30
- got = self.get(key, default=default)
31
- if isinstance(got, bool):
32
- return got
33
- if isinstance(got, (int, float)):
34
- return bool(got)
35
- got_lower = str(got).strip().lower()
36
- if got_lower in ('1', 'true'):
37
- return True
38
- if got_lower in ('0', 'false'):
39
- return False
40
- raise ValueError(
41
- f"Unsupported value for boolean setting: {got}. "
42
- "Supported values are: 0/1, True/False, '0'/'1', 'True'/'False' (case-insensitive)."
43
- )
44
-
45
- def get_list(self, key, default=None):
46
- values = self.get(key, default or [])
47
- if isinstance(values, str):
48
- return [v.strip() for v in values.split(',') if v.strip()]
49
- try:
50
- return list(values)
51
- except TypeError:
52
- return [values]
53
-
54
- def get_dict(self, key, default=None):
55
- value = self.get(key, default or {})
56
- if isinstance(value, str):
57
- value = json.loads(value)
58
- try:
59
- return dict(value)
60
- except TypeError:
61
- return value
62
-
63
- def set(self, key, value):
64
- self.attributes[key] = value
65
-
66
- def set_settings(self, module):
67
- if isinstance(module, str):
68
- module = import_module(module)
69
- for key in dir(module):
70
- if key.isupper():
71
- self.set(key, getattr(module, key))
72
-
73
- # 实现 MutableMapping 必须的方法
74
- def __getitem__(self, item):
75
- return self.attributes[item]
76
-
77
- def __setitem__(self, key, value):
78
- self.set(key, value)
79
-
80
- def __delitem__(self, key):
81
- del self.attributes[key]
82
-
83
- def __iter__(self):
84
- return iter(self.attributes)
85
-
86
- def __len__(self):
87
- return len(self.attributes)
88
-
89
- def __str__(self):
90
- return f'<Settings: {self.attributes}>'
91
-
92
- __repr__ = __str__
93
-
94
- def update_attributes(self, attributes):
95
- if attributes is not None:
96
- for key, value in attributes.items():
97
- self.set(key, value)
98
-
99
- def copy(self):
1
+ #!/usr/bin/python
2
+ # -*- coding: UTF-8 -*-
3
+ import json
4
+ from copy import deepcopy
5
+ from importlib import import_module
6
+ from collections.abc import MutableMapping
7
+
8
+ from crawlo.settings import default_settings
9
+
10
+
11
+ class SettingManager(MutableMapping):
12
+
13
+ def __init__(self, values=None):
14
+ self.attributes = {}
15
+ self.set_settings(default_settings)
16
+ self.update_attributes(values)
17
+
18
+ def get(self, key, default=None):
19
+ """安全获取值,不触发递归"""
20
+ value = self.attributes.get(key, default)
21
+ return value if value is not None else default
22
+
23
+ def get_int(self, key, default=0):
24
+ return int(self.get(key, default=default))
25
+
26
+ def get_float(self, key, default=0.0):
27
+ return float(self.get(key, default=default))
28
+
29
+ def get_bool(self, key, default=False):
30
+ got = self.get(key, default=default)
31
+ if isinstance(got, bool):
32
+ return got
33
+ if isinstance(got, (int, float)):
34
+ return bool(got)
35
+ got_lower = str(got).strip().lower()
36
+ if got_lower in ('1', 'true'):
37
+ return True
38
+ if got_lower in ('0', 'false'):
39
+ return False
40
+ raise ValueError(
41
+ f"Unsupported value for boolean setting: {got}. "
42
+ "Supported values are: 0/1, True/False, '0'/'1', 'True'/'False' (case-insensitive)."
43
+ )
44
+
45
+ def get_list(self, key, default=None):
46
+ values = self.get(key, default or [])
47
+ if isinstance(values, str):
48
+ return [v.strip() for v in values.split(',') if v.strip()]
49
+ try:
50
+ return list(values)
51
+ except TypeError:
52
+ return [values]
53
+
54
+ def get_dict(self, key, default=None):
55
+ value = self.get(key, default or {})
56
+ if isinstance(value, str):
57
+ value = json.loads(value)
58
+ try:
59
+ return dict(value)
60
+ except TypeError:
61
+ return value
62
+
63
+ def set(self, key, value):
64
+ self.attributes[key] = value
65
+
66
+ def set_settings(self, module):
67
+ if isinstance(module, str):
68
+ module = import_module(module)
69
+ for key in dir(module):
70
+ if key.isupper():
71
+ self.set(key, getattr(module, key))
72
+
73
+ # 实现 MutableMapping 必须的方法
74
+ def __getitem__(self, item):
75
+ return self.attributes[item]
76
+
77
+ def __setitem__(self, key, value):
78
+ self.set(key, value)
79
+
80
+ def __delitem__(self, key):
81
+ del self.attributes[key]
82
+
83
+ def __iter__(self):
84
+ return iter(self.attributes)
85
+
86
+ def __len__(self):
87
+ return len(self.attributes)
88
+
89
+ def __str__(self):
90
+ return f'<Settings: {self.attributes}>'
91
+
92
+ __repr__ = __str__
93
+
94
+ def update_attributes(self, attributes):
95
+ if attributes is not None:
96
+ for key, value in attributes.items():
97
+ self.set(key, value)
98
+
99
+ def copy(self):
100
100
  return deepcopy(self)
crawlo/spider/__init__.py CHANGED
@@ -1,36 +1,36 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- from crawlo import Request
4
-
5
-
6
- class Spider(object):
7
- def __init__(self):
8
- if not hasattr(self, 'start_urls'):
9
- self.start_urls = []
10
- self.crawler = None
11
-
12
- @classmethod
13
- def create_instance(cls, crawler):
14
- o = cls()
15
- o.crawler = crawler
16
- return o
17
-
18
- def start_requests(self):
19
- if self.start_urls:
20
- for url in self.start_urls:
21
- yield Request(url=url, dont_filter=True)
22
- else:
23
- if hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
24
- yield Request(getattr(self, 'start_url'), dont_filter=True)
25
-
26
- def parse(self, response):
27
- raise NotImplementedError
28
-
29
- async def spider_opened(self):
30
- pass
31
-
32
- async def spider_closed(self):
33
- pass
34
-
35
- def __str__(self):
36
- return self.__class__.__name__
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ from crawlo import Request
4
+
5
+
6
+ class Spider(object):
7
+ def __init__(self):
8
+ if not hasattr(self, 'start_urls'):
9
+ self.start_urls = []
10
+ self.crawler = None
11
+
12
+ @classmethod
13
+ def create_instance(cls, crawler):
14
+ o = cls()
15
+ o.crawler = crawler
16
+ return o
17
+
18
+ def start_requests(self):
19
+ if self.start_urls:
20
+ for url in self.start_urls:
21
+ yield Request(url=url, dont_filter=True)
22
+ else:
23
+ if hasattr(self, 'start_url') and isinstance(getattr(self, 'start_url'), str):
24
+ yield Request(getattr(self, 'start_url'), dont_filter=True)
25
+
26
+ def parse(self, response):
27
+ raise NotImplementedError
28
+
29
+ async def spider_opened(self):
30
+ pass
31
+
32
+ async def spider_closed(self):
33
+ pass
34
+
35
+ def __str__(self):
36
+ return self.__class__.__name__
crawlo/stats_collector.py CHANGED
@@ -1,47 +1,59 @@
1
- #!/usr/bin/python
2
- # -*- coding:UTF-8 -*-
3
- """
4
- # @Time : 2025-05-17 09:57
5
- # @Author : crawl-coder
6
- # @Desc : 统计信息收集器
7
- """
8
- from pprint import pformat
9
- from crawlo.utils.log import get_logger
10
-
11
-
12
- class StatsCollector(object):
13
-
14
- def __init__(self, crawler):
15
- self.crawler = crawler
16
- self._dump = self.crawler.settings.get_bool('STATS_DUMP')
17
- self._stats = {}
18
- self.logger = get_logger(self.__class__.__name__, "INFO")
19
-
20
- def inc_value(self, key, count=1, start=0):
21
- self._stats[key] = self._stats.setdefault(key, start) + count
22
-
23
- def get_value(self, key, default=None):
24
- return self._stats.get(key, default)
25
-
26
- def get_stats(self):
27
- return self._stats
28
-
29
- def set_stats(self, stats):
30
- self._stats = stats
31
-
32
- def clear_stats(self):
33
- self._stats.clear()
34
-
35
- def close_spider(self, spider_name, reason):
36
- self._stats['reason'] = reason
37
- if self._dump:
38
- self.logger.info(f'{spider_name} stats: \n{pformat(self._stats)}')
39
-
40
- def __getitem__(self, item):
41
- return self._stats[item]
42
-
43
- def __setitem__(self, key, value):
44
- self._stats[key] = value
45
-
46
- def __delitem__(self, key):
47
- del self._stats[key]
1
+ #!/usr/bin/python
2
+ # -*- coding:UTF-8 -*-
3
+ """
4
+ # @Time : 2025-05-17 09:57
5
+ # @Author : crawl-coder
6
+ # @Desc : 统计信息收集器
7
+ """
8
+ from pprint import pformat
9
+ from crawlo.utils.log import get_logger
10
+
11
+
12
+ class StatsCollector(object):
13
+
14
+ def __init__(self, crawler):
15
+ self.crawler = crawler
16
+ self._dump = self.crawler.settings.get_bool('STATS_DUMP')
17
+ self._stats = {}
18
+ self.logger = get_logger(self.__class__.__name__, "INFO")
19
+
20
+ def inc_value(self, key, count=1, start=0):
21
+ self._stats[key] = self._stats.setdefault(key, start) + count
22
+
23
+ def get_value(self, key, default=None):
24
+ return self._stats.get(key, default)
25
+
26
+ def get_stats(self):
27
+ return self._stats
28
+
29
+ def set_stats(self, stats):
30
+ self._stats = stats
31
+
32
+ def clear_stats(self):
33
+ self._stats.clear()
34
+
35
+ def close_spider(self, spider, reason):
36
+ self._stats['reason'] = reason
37
+
38
+ # 首选:使用 spider.name
39
+ # 次选:使用实例的类名
40
+ # 最后:使用一个完全未知的占位符
41
+ spider_name = (
42
+ getattr(spider, 'name', None) or
43
+ spider.__class__.__name__ or
44
+ '<Unknown>'
45
+ )
46
+
47
+ self._stats['spider_name'] = spider_name
48
+
49
+ if self._dump:
50
+ self.logger.info(f'{spider_name} stats: \n{pformat(self._stats)}')
51
+
52
+ def __getitem__(self, item):
53
+ return self._stats[item]
54
+
55
+ def __setitem__(self, key, value):
56
+ self._stats[key] = value
57
+
58
+ def __delitem__(self, key):
59
+ del self._stats[key]