funboost 50.3__py3-none-any.whl → 50.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of funboost might be problematic. Click here for more details.
- funboost/__init__.py +1 -1
- funboost/constant.py +4 -0
- funboost/consumers/base_consumer.py +93 -98
- funboost/consumers/celery_consumer.py +1 -1
- funboost/consumers/dramatiq_consumer.py +0 -5
- funboost/consumers/grpc_consumer.py +1 -1
- funboost/consumers/http_consumer.py +1 -1
- funboost/consumers/http_consumer_aiohttp_old.py +1 -1
- funboost/consumers/huey_consumer.py +2 -5
- funboost/consumers/kafka_consumer.py +0 -2
- funboost/consumers/kafka_consumer_manually_commit.py +0 -1
- funboost/consumers/kombu_consumer.py +0 -39
- funboost/consumers/mysql_cdc_consumer.py +1 -3
- funboost/consumers/pulsar_consumer.py +10 -5
- funboost/consumers/rabbitmq_amqpstorm_consumer.py +7 -8
- funboost/consumers/rabbitmq_complex_routing_consumer.py +54 -0
- funboost/consumers/redis_consumer.py +1 -1
- funboost/consumers/redis_consumer_ack_able.py +1 -1
- funboost/consumers/redis_consumer_ack_using_timeout.py +2 -6
- funboost/consumers/redis_consumer_priority.py +1 -1
- funboost/consumers/redis_stream_consumer.py +1 -3
- funboost/consumers/tcp_consumer.py +1 -1
- funboost/consumers/udp_consumer.py +1 -1
- funboost/consumers/zeromq_consumer.py +1 -1
- funboost/contrib/save_function_result_status/__init__.py +0 -0
- funboost/contrib/{save_result_status_to_sqldb.py → save_function_result_status/save_result_status_to_sqldb.py} +8 -41
- funboost/contrib/save_function_result_status/save_result_status_use_dataset.py +47 -0
- funboost/core/booster.py +7 -1
- funboost/core/broker_kind__exclusive_config_default_define.py +229 -0
- funboost/core/funboost_time.py +3 -82
- funboost/core/func_params_model.py +9 -3
- funboost/core/helper_funs.py +2 -2
- funboost/factories/broker_kind__publsiher_consumer_type_map.py +5 -0
- funboost/funboost_config_deafult.py +0 -3
- funboost/function_result_web/templates/fun_result_table.html +1 -1
- funboost/publishers/base_publisher.py +3 -2
- funboost/publishers/rabbitmq_amqpstorm_publisher.py +8 -7
- funboost/publishers/rabbitmq_complex_routing_publisher.py +84 -0
- funboost/utils/redis_manager.py +11 -5
- {funboost-50.3.dist-info → funboost-50.4.dist-info}/METADATA +156 -97
- {funboost-50.3.dist-info → funboost-50.4.dist-info}/RECORD +44 -40
- {funboost-50.3.dist-info → funboost-50.4.dist-info}/WHEEL +1 -1
- funboost-50.3.dist-info/LICENSE +0 -203
- {funboost-50.3.dist-info → funboost-50.4.dist-info}/entry_points.txt +0 -0
- {funboost-50.3.dist-info → funboost-50.4.dist-info}/top_level.txt +0 -0
|
@@ -81,45 +81,6 @@ class KombuConsumer(AbstractConsumer, ):
|
|
|
81
81
|
使用kombu作为中间件,这个能直接一次性支持很多种小众中间件,但性能很差,除非是分布式函数调度框架没实现的中间件种类用户才可以用这种,用户也可以自己对比性能。
|
|
82
82
|
"""
|
|
83
83
|
|
|
84
|
-
BROKER_EXCLUSIVE_CONFIG_DEFAULT = {'kombu_url': None, # 如果这里也配置了kombu_url,则优先使用跟着你的kombu_url,否则使用funboost_config. KOMBU_URL
|
|
85
|
-
'transport_options': {}, # transport_options是kombu的transport_options 。
|
|
86
|
-
'prefetch_count': 500
|
|
87
|
-
}
|
|
88
|
-
# prefetch_count 是预获取消息数量
|
|
89
|
-
''' transport_options是kombu的transport_options 。
|
|
90
|
-
例如使用kombu使用redis作为中间件时候,可以设置 visibility_timeout 来决定消息取出多久没有ack,就自动重回队列。
|
|
91
|
-
kombu的每个中间件能设置什么 transport_options 可以看 kombu的源码中的 transport_options 参数说明。
|
|
92
|
-
|
|
93
|
-
例如kombu redis的Transport Options 说明
|
|
94
|
-
D:\ProgramData\Miniconda3\envs\py311\Lib\site-packages\kombu\transport\redis.py
|
|
95
|
-
|
|
96
|
-
Transport Options
|
|
97
|
-
=================
|
|
98
|
-
* ``sep``
|
|
99
|
-
* ``ack_emulation``: (bool) If set to True transport will
|
|
100
|
-
simulate Acknowledge of AMQP protocol.
|
|
101
|
-
* ``unacked_key``
|
|
102
|
-
* ``unacked_index_key``
|
|
103
|
-
* ``unacked_mutex_key``
|
|
104
|
-
* ``unacked_mutex_expire``
|
|
105
|
-
* ``visibility_timeout``
|
|
106
|
-
* ``unacked_restore_limit``
|
|
107
|
-
* ``fanout_prefix``
|
|
108
|
-
* ``fanout_patterns``
|
|
109
|
-
* ``global_keyprefix``: (str) The global key prefix to be prepended to all keys
|
|
110
|
-
used by Kombu
|
|
111
|
-
* ``socket_timeout``
|
|
112
|
-
* ``socket_connect_timeout``
|
|
113
|
-
* ``socket_keepalive``
|
|
114
|
-
* ``socket_keepalive_options``
|
|
115
|
-
* ``queue_order_strategy``
|
|
116
|
-
* ``max_connections``
|
|
117
|
-
* ``health_check_interval``
|
|
118
|
-
* ``retry_on_timeout``
|
|
119
|
-
* ``priority_steps``
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
'''
|
|
123
84
|
|
|
124
85
|
def custom_init(self):
|
|
125
86
|
self.kombu_url = self.consumer_params.broker_exclusive_config['kombu_url'] or BrokerConnConfig.KOMBU_URL
|
|
@@ -21,9 +21,7 @@ class MysqlCdcConsumer(AbstractConsumer):
|
|
|
21
21
|
This broker is consumer-driven; it automatically generates tasks from database changes.
|
|
22
22
|
"""
|
|
23
23
|
|
|
24
|
-
|
|
25
|
-
'BinLogStreamReaderConfig': {},
|
|
26
|
-
}
|
|
24
|
+
|
|
27
25
|
|
|
28
26
|
def custom_init(self):
|
|
29
27
|
"""Validates the essential configuration."""
|
|
@@ -32,10 +32,7 @@ class PulsarConsumer(AbstractConsumer, ):
|
|
|
32
32
|
pulsar作为中间件实现的。
|
|
33
33
|
"""
|
|
34
34
|
|
|
35
|
-
|
|
36
|
-
'replicate_subscription_state_enabled': True,
|
|
37
|
-
'consumer_type': ConsumerType.Shared,
|
|
38
|
-
}
|
|
35
|
+
|
|
39
36
|
|
|
40
37
|
def custom_init(self):
|
|
41
38
|
pass
|
|
@@ -46,9 +43,17 @@ class PulsarConsumer(AbstractConsumer, ):
|
|
|
46
43
|
except ImportError:
|
|
47
44
|
raise ImportError('需要用户自己 pip install pulsar-client ,')
|
|
48
45
|
self._client = pulsar.Client(BrokerConnConfig.PULSAR_URL, )
|
|
46
|
+
|
|
47
|
+
consumer_type_map = {
|
|
48
|
+
'Exclusive':ConsumerType.Exclusive,
|
|
49
|
+
'Shared':ConsumerType.Shared,
|
|
50
|
+
'Failover':ConsumerType.Failover,
|
|
51
|
+
'KeyShared':ConsumerType.KeyShared,
|
|
52
|
+
}
|
|
53
|
+
consumer_type_obj = consumer_type_map[self.consumer_params.broker_exclusive_config['consumer_type']]
|
|
49
54
|
self._consumer = self._client.subscribe(self._queue_name, schema=schema.StringSchema(), consumer_name=f'funboost_consumer_{os.getpid()}',
|
|
50
55
|
subscription_name=self.consumer_params.broker_exclusive_config['subscription_name'],
|
|
51
|
-
consumer_type=
|
|
56
|
+
consumer_type=consumer_type_obj,
|
|
52
57
|
replicate_subscription_state_enabled=self.consumer_params.broker_exclusive_config['replicate_subscription_state_enabled'])
|
|
53
58
|
while True:
|
|
54
59
|
msg = self._consumer.receive()
|
|
@@ -3,10 +3,8 @@
|
|
|
3
3
|
# @Time : 2022/8/8 0008 13:30
|
|
4
4
|
|
|
5
5
|
import amqpstorm
|
|
6
|
-
from funboost.constant import BrokerEnum
|
|
7
6
|
from funboost.consumers.base_consumer import AbstractConsumer
|
|
8
|
-
|
|
9
|
-
from funboost.core.func_params_model import PublisherParams
|
|
7
|
+
|
|
10
8
|
|
|
11
9
|
|
|
12
10
|
class RabbitmqConsumerAmqpStorm(AbstractConsumer):
|
|
@@ -14,7 +12,8 @@ class RabbitmqConsumerAmqpStorm(AbstractConsumer):
|
|
|
14
12
|
使用AmqpStorm实现的,多线程安全的,不用加锁。
|
|
15
13
|
funboost 强烈推荐使用这个做消息队列中间件。
|
|
16
14
|
"""
|
|
17
|
-
|
|
15
|
+
|
|
16
|
+
# _rabbitmq_pb_cls = RabbitmqPublisherUsingAmqpStorm
|
|
18
17
|
|
|
19
18
|
def _shedual_task(self):
|
|
20
19
|
# noinspection PyTypeChecker
|
|
@@ -24,13 +23,14 @@ class RabbitmqConsumerAmqpStorm(AbstractConsumer):
|
|
|
24
23
|
kw = {'amqpstorm_message': amqpstorm_message, 'body': body}
|
|
25
24
|
self._submit_task(kw)
|
|
26
25
|
|
|
27
|
-
rp =
|
|
28
|
-
|
|
26
|
+
# rp = self._rabbitmq_pb_cls(publisher_params=PublisherParams(queue_name=self.queue_name,broker_kind=self.consumer_params.broker_kind,
|
|
27
|
+
# broker_exclusive_config=self.consumer_params.broker_exclusive_config))
|
|
28
|
+
rp = self.bulid_a_new_publisher_of_same_queue()
|
|
29
29
|
rp.init_broker()
|
|
30
30
|
rp.channel_wrapper_by_ampqstormbaic.qos(self.consumer_params.concurrent_num)
|
|
31
31
|
rp.channel_wrapper_by_ampqstormbaic.consume(callback=callback, queue=self.queue_name, no_ack=self.consumer_params.broker_exclusive_config['no_ack'],
|
|
32
32
|
)
|
|
33
|
-
self._rp=rp
|
|
33
|
+
self._rp = rp
|
|
34
34
|
rp.channel.start_consuming(auto_decode=True)
|
|
35
35
|
|
|
36
36
|
def _confirm_consume(self, kw):
|
|
@@ -48,4 +48,3 @@ class RabbitmqConsumerAmqpStorm(AbstractConsumer):
|
|
|
48
48
|
# kw['amqpstorm_message'].reject(requeue=True)
|
|
49
49
|
# kw['amqpstorm_message'].ack()
|
|
50
50
|
# self.publisher_of_same_queue.publish(kw['body'])
|
|
51
|
-
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# @Author : ydf
|
|
3
|
+
# @Time : 2022/8/8 0008 13:30
|
|
4
|
+
import amqpstorm
|
|
5
|
+
from funboost.consumers.rabbitmq_amqpstorm_consumer import RabbitmqConsumerAmqpStorm
|
|
6
|
+
from amqpstorm.queue import Queue as AmqpStormQueue
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RabbitmqComplexRoutingConsumer(RabbitmqConsumerAmqpStorm):
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
"""
|
|
13
|
+
def custom_init(self):
|
|
14
|
+
super().custom_init()
|
|
15
|
+
rp = self.bulid_a_new_publisher_of_same_queue()
|
|
16
|
+
rp.init_broker() # 发布者那边只声明了交换机
|
|
17
|
+
|
|
18
|
+
# 消费者负责声明队列和唯一的绑定逻辑
|
|
19
|
+
AmqpStormQueue(rp.channel).declare(**rp.queue_declare_params)
|
|
20
|
+
|
|
21
|
+
# 消费者负责唯一的绑定逻辑
|
|
22
|
+
if rp._exchange_name:
|
|
23
|
+
self.logger.info(f'消费者开始绑定: 队列 [{self._queue_name}] <--> 交换机 [{rp._exchange_name}] (类型: {rp._exchange_type})')
|
|
24
|
+
|
|
25
|
+
routing_key_bind = self.consumer_params.broker_exclusive_config.get('routing_key_for_bind')
|
|
26
|
+
arguments_for_bind = None
|
|
27
|
+
|
|
28
|
+
if rp._exchange_type == 'fanout':
|
|
29
|
+
routing_key_bind = '' # fanout 必须使用空 routing_key
|
|
30
|
+
elif rp._exchange_type == 'headers':
|
|
31
|
+
routing_key_bind = '' # headers 必须使用空 routing_key
|
|
32
|
+
arguments_for_bind = self.consumer_params.broker_exclusive_config.get('headers_for_bind', {})
|
|
33
|
+
arguments_for_bind['x-match'] = self.consumer_params.broker_exclusive_config.get('x_match_for_bind', 'all')
|
|
34
|
+
elif routing_key_bind is None: # 用户未指定绑定键时,根据交换机类型设置默认值
|
|
35
|
+
if rp._exchange_type == 'topic':
|
|
36
|
+
routing_key_bind = '#' # topic 默认订阅所有
|
|
37
|
+
else: # direct
|
|
38
|
+
routing_key_bind = self._queue_name
|
|
39
|
+
|
|
40
|
+
AmqpStormQueue(rp.channel).bind(queue=self._queue_name, exchange=rp._exchange_name,
|
|
41
|
+
routing_key=routing_key_bind, arguments=arguments_for_bind)
|
|
42
|
+
self._rp = rp
|
|
43
|
+
|
|
44
|
+
def _shedual_task(self):
|
|
45
|
+
# 重写父类的方法,以支持更复杂的绑定逻辑
|
|
46
|
+
def callback(amqpstorm_message: amqpstorm.Message):
|
|
47
|
+
body = amqpstorm_message.body
|
|
48
|
+
kw = {'amqpstorm_message': amqpstorm_message, 'body': body}
|
|
49
|
+
self._submit_task(kw)
|
|
50
|
+
|
|
51
|
+
rp = self._rp
|
|
52
|
+
rp.channel_wrapper_by_ampqstormbaic.qos(self.consumer_params.concurrent_num)
|
|
53
|
+
rp.channel_wrapper_by_ampqstormbaic.consume(callback=callback, queue=self.queue_name, no_ack=self.consumer_params.broker_exclusive_config['no_ack'])
|
|
54
|
+
rp.channel.start_consuming(auto_decode=True)
|
|
@@ -20,7 +20,7 @@ class RedisConsumer(AbstractConsumer, RedisMixin):
|
|
|
20
20
|
这个是复杂版,一次性拉取100个,减少和redis的交互,简单版在 funboost/consumers/redis_consumer_simple.py
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
|
-
|
|
23
|
+
|
|
24
24
|
|
|
25
25
|
# noinspection DuplicatedCode
|
|
26
26
|
def _shedual_task(self):
|
|
@@ -99,7 +99,7 @@ class RedisConsumerAckAble(ConsumerConfirmMixinWithTheHelpOfRedisByHearbeat, Abs
|
|
|
99
99
|
# print(script_4(keys=["text_pipelien1","text_pipelien1b"]))
|
|
100
100
|
"""
|
|
101
101
|
|
|
102
|
-
|
|
102
|
+
|
|
103
103
|
|
|
104
104
|
def _shedual_task000(self):
|
|
105
105
|
# 可以采用lua脚本,也可以采用redis的watch配合pipeline使用。比代码分两行pop和zadd比还能减少一次io交互,还能防止丢失小概率一个任务。
|
|
@@ -15,13 +15,9 @@ class RedisConsumerAckUsingTimeout(AbstractConsumer, RedisMixin):
|
|
|
15
15
|
使用超时未能ack就自动重入消息队列,例如消息取出后,由于突然断电或重启或其他原因,导致消息以后再也不能主动ack了,超过一定时间就重新放入消息队列
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
|
-
BROKER_EXCLUSIVE_CONFIG_DEFAULT = {'ack_timeout': 3600}
|
|
19
18
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
@boost(BoosterParams(queue_name='test_redis_ack__use_timeout', broker_kind=BrokerEnum.REIDS_ACK_USING_TIMEOUT,
|
|
23
|
-
concurrent_num=5, log_level=20, broker_exclusive_config={'ack_timeout': 30}))
|
|
24
|
-
'''
|
|
19
|
+
|
|
20
|
+
|
|
25
21
|
|
|
26
22
|
def custom_init(self):
|
|
27
23
|
self._unack_zset_name = f'{self._queue_name}__unack_using_timeout'
|
|
@@ -13,11 +13,9 @@ class RedisStreamConsumer(AbstractConsumer, RedisMixin):
|
|
|
13
13
|
"""
|
|
14
14
|
redis 的 stream 结构 作为中间件实现的。需要redis 5.0以上,redis stream结构 是redis的消息队列,概念类似kafka,功能远超 list结构。
|
|
15
15
|
"""
|
|
16
|
-
GROUP = 'funboost_group'
|
|
17
|
-
BROKER_EXCLUSIVE_CONFIG_DEFAULT = {'group': 'funboost_group','pull_msg_batch_size': 100}
|
|
18
16
|
|
|
19
17
|
def custom_init(self):
|
|
20
|
-
self.group = self.consumer_params.broker_exclusive_config['group']
|
|
18
|
+
self.group = self.consumer_params.broker_exclusive_config['group']
|
|
21
19
|
|
|
22
20
|
def start_consuming_message(self):
|
|
23
21
|
redis_server_info_dict = self.redis_db_frame.info()
|
|
File without changes
|
|
@@ -1,3 +1,11 @@
|
|
|
1
|
+
|
|
2
|
+
"""
|
|
3
|
+
一个贡献,保存函数结果状态到 mysql postgre 等等,因为默认是使用mongo保存.
|
|
4
|
+
|
|
5
|
+
可以在 @boost里面指定 user_custom_record_process_info_func= save_result_status_to_sqlalchemy
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
|
|
1
9
|
import copy
|
|
2
10
|
import functools
|
|
3
11
|
import json
|
|
@@ -7,49 +15,8 @@ from sqlalchemy import create_engine
|
|
|
7
15
|
|
|
8
16
|
from funboost import boost, FunctionResultStatus, funboost_config_deafult
|
|
9
17
|
|
|
10
|
-
"""
|
|
11
|
-
-- 如果用户是先保存到mysql中而非mongodb,用户自己先创建表,用于保存函数消费状态和结果.
|
|
12
|
-
|
|
13
|
-
CREATE TABLE funboost_consume_results
|
|
14
|
-
(
|
|
15
|
-
_id VARCHAR(255),
|
|
16
|
-
`function` VARCHAR(255),
|
|
17
|
-
host_name VARCHAR(255),
|
|
18
|
-
host_process VARCHAR(255),
|
|
19
|
-
insert_minutes VARCHAR(255),
|
|
20
|
-
insert_time datetime,
|
|
21
|
-
insert_time_str VARCHAR(255),
|
|
22
|
-
msg_dict JSON,
|
|
23
|
-
params JSON,
|
|
24
|
-
params_str VARCHAR(255),
|
|
25
|
-
process_id BIGINT(20),
|
|
26
|
-
publish_time FLOAT,
|
|
27
|
-
publish_time_str VARCHAR(255),
|
|
28
|
-
queue_name VARCHAR(255),
|
|
29
|
-
result VARCHAR(255),
|
|
30
|
-
run_times INT,
|
|
31
|
-
script_name VARCHAR(255),
|
|
32
|
-
script_name_long VARCHAR(255),
|
|
33
|
-
success BOOLEAN,
|
|
34
|
-
task_id VARCHAR(255),
|
|
35
|
-
thread_id BIGINT(20),
|
|
36
|
-
time_cost FLOAT,
|
|
37
|
-
time_end FLOAT,
|
|
38
|
-
time_start FLOAT,
|
|
39
|
-
total_thread INT,
|
|
40
|
-
utime VARCHAR(255),
|
|
41
|
-
`exception` MEDIUMTEXT ,
|
|
42
|
-
rpc_result_expire_seconds BIGINT(20),
|
|
43
|
-
primary key (_id),
|
|
44
|
-
key idx_insert_time(insert_time),
|
|
45
|
-
key idx_queue_name_insert_time(queue_name,insert_time),
|
|
46
|
-
key idx_params_str(params_str)
|
|
47
|
-
)
|
|
48
|
-
|
|
49
18
|
|
|
50
19
|
|
|
51
|
-
"""
|
|
52
|
-
|
|
53
20
|
|
|
54
21
|
def _gen_insert_sql_and_values_by_dict(dictx: dict):
|
|
55
22
|
key_list = [f'`{k}`' for k in dictx.keys()]
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
|
|
2
|
+
"""
|
|
3
|
+
一个贡献,保存函数结果状态到 mysql postgre 等等,因为默认是使用mongo保存.
|
|
4
|
+
|
|
5
|
+
可以在 @boost里面指定 user_custom_record_process_info_func= save_result_status_to_sqlalchemy
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import copy
|
|
10
|
+
import functools
|
|
11
|
+
import json
|
|
12
|
+
import threading
|
|
13
|
+
|
|
14
|
+
import dataset
|
|
15
|
+
|
|
16
|
+
from funboost import boost, FunctionResultStatus, funboost_config_deafult,AbstractConsumer
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
pid__db_map = {}
|
|
21
|
+
_lock = threading.Lock()
|
|
22
|
+
def get_db(connect_url) -> dataset.Database:
|
|
23
|
+
"""封装一个函数,判断pid"""
|
|
24
|
+
pid = os.getpid()
|
|
25
|
+
key = (pid, connect_url,)
|
|
26
|
+
if key not in pid__db_map:
|
|
27
|
+
with _lock:
|
|
28
|
+
if key not in pid__db_map:
|
|
29
|
+
pid__db_map[key] = dataset.connect(connect_url)
|
|
30
|
+
return pid__db_map[key]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
connect_url ='mysql+pymysql://root:123456@127.0.0.1:3306/testdb7' # dataset或 SQLAlchemy 的url连接形式
|
|
34
|
+
|
|
35
|
+
# 方式一:@boost 装饰器里面使用函数钩子,user_custom_record_process_info_func
|
|
36
|
+
def save_result_status_use_dataset(result_status: FunctionResultStatus):
|
|
37
|
+
db = get_db(connect_url)
|
|
38
|
+
table = db['funboost_consume_results']
|
|
39
|
+
table.upsert(result_status.get_status_dict(), ['_id'])
|
|
40
|
+
|
|
41
|
+
# 方式二:装饰器里面使用 consumer_override_cls,重写 user_custom_record_process_info_func
|
|
42
|
+
class ResultStatusUseDatasetMixin(AbstractConsumer):
|
|
43
|
+
def user_custom_record_process_info_func(self, current_function_result_status: FunctionResultStatus):
|
|
44
|
+
# print(current_function_result_status.get_status_dict())
|
|
45
|
+
db = get_db(connect_url)
|
|
46
|
+
table = db['funboost_consume_results']
|
|
47
|
+
table.upsert(current_function_result_status.get_status_dict(), ['_id'])
|
funboost/core/booster.py
CHANGED
|
@@ -97,7 +97,13 @@ class Booster:
|
|
|
97
97
|
return types.MethodType(self, instance)
|
|
98
98
|
|
|
99
99
|
def __call__(self, *args, **kwargs) -> Booster:
|
|
100
|
-
|
|
100
|
+
"""
|
|
101
|
+
# 第一次调用__call__,是装饰函数,返回了Booster对象,从此之后,被消费函数就变成了Booster类型对象.
|
|
102
|
+
# Booster类型对象,怎么支持函数原来本身的直接运行功能? 那就是要让他走到 else 分支,直接用 self.consuming_function 函数本身去运行入参
|
|
103
|
+
# 这里非常巧妙
|
|
104
|
+
# 如果用户之后不打算使用funboost 的分布式函数调度功能,那么直接运行函数和原来一模一样,用户不需要删除 @boost装饰器 也能直接运行函数本身
|
|
105
|
+
"""
|
|
106
|
+
if len(kwargs) == 0 and len(args) == 1 and isinstance(args[0], typing.Callable) and not isinstance(args[0], Booster):
|
|
101
107
|
consuming_function = args[0]
|
|
102
108
|
self.boost_params.consuming_function = consuming_function
|
|
103
109
|
self.boost_params.consuming_function_raw = consuming_function
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
# from __future__ import annotations
|
|
2
|
+
# import typing
|
|
3
|
+
# if typing.TYPE_CHECKING:
|
|
4
|
+
# from logging import Logger
|
|
5
|
+
|
|
6
|
+
from logging import Logger
|
|
7
|
+
from funboost.constant import BrokerEnum
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
broker_kind__exclusive_config_default_map: dict = {}
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def register_broker_exclusive_config_default(
|
|
14
|
+
broker_kind: str, broker_exclusive_config_default: dict
|
|
15
|
+
):
|
|
16
|
+
broker_kind__exclusive_config_default_map[broker_kind] = broker_exclusive_config_default
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def generate_broker_exclusive_config(
|
|
21
|
+
broker_kind: str,
|
|
22
|
+
user_broker_exclusive_config: dict,
|
|
23
|
+
logger: Logger,
|
|
24
|
+
):
|
|
25
|
+
broker_exclusive_config_default = broker_kind__exclusive_config_default_map.get(
|
|
26
|
+
broker_kind, {}
|
|
27
|
+
)
|
|
28
|
+
broker_exclusive_config_keys = broker_exclusive_config_default.keys()
|
|
29
|
+
if user_broker_exclusive_config:
|
|
30
|
+
if set(user_broker_exclusive_config).issubset(broker_exclusive_config_keys):
|
|
31
|
+
logger.info(
|
|
32
|
+
f"当前消息队列中间件能支持特殊独有配置 {broker_exclusive_config_default.keys()}"
|
|
33
|
+
)
|
|
34
|
+
else:
|
|
35
|
+
logger.warning(f"""当前消息队列中间件含有不支持的特殊配置 {user_broker_exclusive_config.keys()} ,
|
|
36
|
+
能支持的特殊独有配置包括 {broker_exclusive_config_keys}""")
|
|
37
|
+
broker_exclusive_config_merge = dict()
|
|
38
|
+
broker_exclusive_config_merge.update(broker_exclusive_config_default)
|
|
39
|
+
broker_exclusive_config_merge.update(user_broker_exclusive_config)
|
|
40
|
+
return broker_exclusive_config_merge
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# celery的可以配置项大全 https://docs.celeryq.dev/en/stable/userguide/configuration.html#new-lowercase-settings
|
|
44
|
+
# celery @app.task() 所有可以配置项可以看 D:\ProgramData\Miniconda3\Lib\site-packages\celery\app\task.py
|
|
45
|
+
register_broker_exclusive_config_default(BrokerEnum.CELERY, {"celery_task_config": {}})
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# dramatiq_actor_options 的值可以是:
|
|
49
|
+
# {'max_age', 'throws', 'pipe_target', 'pipe_ignore', 'on_success', 'retry_when', 'time_limit', 'min_backoff', 'max_retries', 'max_backoff', 'notify_shutdown', 'on_failure'}
|
|
50
|
+
register_broker_exclusive_config_default(
|
|
51
|
+
BrokerEnum.DRAMATIQ, {"dramatiq_actor_options": {}}
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
register_broker_exclusive_config_default(
|
|
56
|
+
BrokerEnum.GRPC,
|
|
57
|
+
{
|
|
58
|
+
"host": "127.0.0.1",
|
|
59
|
+
"port": None,
|
|
60
|
+
},
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
register_broker_exclusive_config_default(
|
|
64
|
+
BrokerEnum.HTTP,
|
|
65
|
+
{
|
|
66
|
+
"host": "127.0.0.1",
|
|
67
|
+
"port": None,
|
|
68
|
+
},
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
"""
|
|
73
|
+
retries=0, retry_delay=0, priority=None, context=False,
|
|
74
|
+
name=None, expires=None, **kwargs
|
|
75
|
+
"""
|
|
76
|
+
register_broker_exclusive_config_default(BrokerEnum.HUEY, {"huey_task_kwargs": {}})
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
"""
|
|
80
|
+
auto_offset_reset 介绍
|
|
81
|
+
|
|
82
|
+
auto_offset_reset (str): A policy for resetting offsets on
|
|
83
|
+
OffsetOutOfRange errors: 'earliest' will move to the oldest
|
|
84
|
+
available message, 'latest' will move to the most recent. Any
|
|
85
|
+
other value will raise the exception. Default: 'latest'.
|
|
86
|
+
"""
|
|
87
|
+
register_broker_exclusive_config_default(
|
|
88
|
+
BrokerEnum.KAFKA,
|
|
89
|
+
{
|
|
90
|
+
"group_id": "funboost_kafka",
|
|
91
|
+
"auto_offset_reset": "earliest",
|
|
92
|
+
"num_partitions": 10,
|
|
93
|
+
"replication_factor": 1,
|
|
94
|
+
},
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
register_broker_exclusive_config_default(
|
|
99
|
+
BrokerEnum.KAFKA_CONFLUENT,
|
|
100
|
+
{
|
|
101
|
+
"group_id": "funboost_kafka",
|
|
102
|
+
"auto_offset_reset": "earliest",
|
|
103
|
+
"num_partitions": 10,
|
|
104
|
+
"replication_factor": 1,
|
|
105
|
+
},
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
"""
|
|
110
|
+
# prefetch_count 是预获取消息数量
|
|
111
|
+
transport_options是kombu的transport_options 。
|
|
112
|
+
例如使用kombu使用redis作为中间件时候,可以设置 visibility_timeout 来决定消息取出多久没有ack,就自动重回队列。
|
|
113
|
+
kombu的每个中间件能设置什么 transport_options 可以看 kombu的源码中的 transport_options 参数说明。
|
|
114
|
+
"""
|
|
115
|
+
register_broker_exclusive_config_default(
|
|
116
|
+
BrokerEnum.KOMBU,
|
|
117
|
+
{
|
|
118
|
+
"kombu_url": None, # 如果这里也配置了kombu_url,则优先使用跟着你的kombu_url,否则使用funboost_config. KOMBU_URL
|
|
119
|
+
"transport_options": {}, # transport_options是kombu的transport_options 。
|
|
120
|
+
"prefetch_count": 500,
|
|
121
|
+
},
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
register_broker_exclusive_config_default(
|
|
126
|
+
BrokerEnum.MYSQL_CDC, {"BinLogStreamReaderConfig": {}}
|
|
127
|
+
) # 入参是 BinLogStreamReader 的入参BinLogStreamReader
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
"""
|
|
131
|
+
consumer_type Members:
|
|
132
|
+
Exclusive Shared Failover KeyShared
|
|
133
|
+
"""
|
|
134
|
+
register_broker_exclusive_config_default(
|
|
135
|
+
BrokerEnum.PULSAR,
|
|
136
|
+
{
|
|
137
|
+
"subscription_name": "funboost_group",
|
|
138
|
+
"replicate_subscription_state_enabled": True,
|
|
139
|
+
"consumer_type": "Shared",
|
|
140
|
+
},
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
register_broker_exclusive_config_default(
|
|
145
|
+
BrokerEnum.RABBITMQ_AMQPSTORM,
|
|
146
|
+
{
|
|
147
|
+
"queue_durable": True,
|
|
148
|
+
"x-max-priority": None, # x-max-priority 是 rabbitmq的优先级队列配置,必须为整数,强烈建议要小于5。为None就代表队列不支持优先级。
|
|
149
|
+
"no_ack": False,
|
|
150
|
+
},
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
register_broker_exclusive_config_default(
|
|
155
|
+
BrokerEnum.RABBITMQ_COMPLEX_ROUTING,
|
|
156
|
+
{
|
|
157
|
+
"queue_durable": True,
|
|
158
|
+
"x-max-priority": None, # x-max-priority 是 rabbitmq的优先级队列配置,必须为整数,强烈建议要小于5。为None就代表队列不支持优先级。
|
|
159
|
+
"no_ack": False,
|
|
160
|
+
"exchange_name": "",
|
|
161
|
+
"exchange_type": "direct",
|
|
162
|
+
"routing_key_for_bind": None, # 绑定交换机和队列时使用的key。None表示使用queue_name作为绑定键;""(空字符串)也表示使用queue_name。对于fanout和headers交换机,此值会被忽略。对于topic交换机,可以使用通配符*和#。
|
|
163
|
+
"routing_key_for_publish": None,
|
|
164
|
+
# for headers exchange
|
|
165
|
+
"headers_for_bind": {},
|
|
166
|
+
"x_match_for_bind": "all", # all or any
|
|
167
|
+
"exchange_declare_durable": True,
|
|
168
|
+
},
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
register_broker_exclusive_config_default(
|
|
173
|
+
BrokerEnum.REDIS,
|
|
174
|
+
{
|
|
175
|
+
"redis_bulk_push": 1,
|
|
176
|
+
"pull_msg_batch_size": 100,
|
|
177
|
+
},
|
|
178
|
+
) # redis_bulk_push 是否redis批量推送
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
register_broker_exclusive_config_default(
|
|
182
|
+
BrokerEnum.REDIS_ACK_ABLE, {"pull_msg_batch_size": 100}
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
# RedisConsumerAckUsingTimeout的ack timeot 是代表消息取出后过了多少秒还未ack,就自动重回队列。这个配置一定要大于函数消耗时间,否则不停的重回队列。
|
|
186
|
+
"""
|
|
187
|
+
用法,如何设置ack_timeout,是使用 broker_exclusive_config 中传递,就能覆盖这里的3600,用户不用改BROKER_EXCLUSIVE_CONFIG_DEFAULT的源码。
|
|
188
|
+
@boost(BoosterParams(queue_name='test_redis_ack__use_timeout', broker_kind=BrokerEnum.REIDS_ACK_USING_TIMEOUT,
|
|
189
|
+
concurrent_num=5, log_level=20, broker_exclusive_config={'ack_timeout': 30}))
|
|
190
|
+
"""
|
|
191
|
+
register_broker_exclusive_config_default(
|
|
192
|
+
BrokerEnum.REIDS_ACK_USING_TIMEOUT, {"ack_timeout": 3600}
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
register_broker_exclusive_config_default(
|
|
197
|
+
BrokerEnum.REDIS_PRIORITY, {"x-max-priority": None}
|
|
198
|
+
) # x-max-priority 是 rabbitmq的优先级队列配置,必须为整数,强烈建议要小于5。为None就代表队列不支持优先级。
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
register_broker_exclusive_config_default(
|
|
202
|
+
BrokerEnum.REDIS_STREAM,
|
|
203
|
+
{
|
|
204
|
+
"group": "funboost_group",
|
|
205
|
+
"pull_msg_batch_size": 100,
|
|
206
|
+
},
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
register_broker_exclusive_config_default(
|
|
211
|
+
BrokerEnum.TCP,
|
|
212
|
+
{
|
|
213
|
+
"host": "127.0.0.1",
|
|
214
|
+
"port": None,
|
|
215
|
+
"bufsize": 10240,
|
|
216
|
+
},
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
register_broker_exclusive_config_default(
|
|
221
|
+
BrokerEnum.UDP,
|
|
222
|
+
{
|
|
223
|
+
"host": "127.0.0.1",
|
|
224
|
+
"port": None,
|
|
225
|
+
"bufsize": 10240,
|
|
226
|
+
},
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
register_broker_exclusive_config_default(BrokerEnum.ZEROMQ, {"port": None})
|