funboost 50.2__py3-none-any.whl → 50.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of funboost might be problematic. Click here for more details.

Files changed (47) hide show
  1. funboost/__init__.py +1 -1
  2. funboost/constant.py +4 -0
  3. funboost/consumers/base_consumer.py +95 -96
  4. funboost/consumers/celery_consumer.py +1 -1
  5. funboost/consumers/dramatiq_consumer.py +0 -5
  6. funboost/consumers/grpc_consumer.py +2 -19
  7. funboost/consumers/http_consumer.py +107 -40
  8. funboost/consumers/http_consumer_aiohttp_old.py +113 -0
  9. funboost/consumers/huey_consumer.py +2 -5
  10. funboost/consumers/kafka_consumer.py +1 -6
  11. funboost/consumers/kafka_consumer_manually_commit.py +0 -1
  12. funboost/consumers/kombu_consumer.py +0 -39
  13. funboost/consumers/mysql_cdc_consumer.py +1 -3
  14. funboost/consumers/pulsar_consumer.py +10 -5
  15. funboost/consumers/rabbitmq_amqpstorm_consumer.py +7 -8
  16. funboost/consumers/rabbitmq_complex_routing_consumer.py +54 -0
  17. funboost/consumers/redis_consumer.py +1 -1
  18. funboost/consumers/redis_consumer_ack_able.py +1 -1
  19. funboost/consumers/redis_consumer_ack_using_timeout.py +2 -6
  20. funboost/consumers/redis_consumer_priority.py +1 -1
  21. funboost/consumers/redis_stream_consumer.py +1 -3
  22. funboost/consumers/tcp_consumer.py +1 -1
  23. funboost/consumers/udp_consumer.py +1 -1
  24. funboost/consumers/zeromq_consumer.py +1 -1
  25. funboost/contrib/save_function_result_status/__init__.py +0 -0
  26. funboost/contrib/{save_result_status_to_sqldb.py → save_function_result_status/save_result_status_to_sqldb.py} +8 -41
  27. funboost/contrib/save_function_result_status/save_result_status_use_dataset.py +47 -0
  28. funboost/core/booster.py +38 -3
  29. funboost/core/broker_kind__exclusive_config_default_define.py +229 -0
  30. funboost/core/funboost_time.py +10 -45
  31. funboost/core/func_params_model.py +28 -4
  32. funboost/core/helper_funs.py +9 -8
  33. funboost/core/msg_result_getter.py +27 -0
  34. funboost/factories/broker_kind__publsiher_consumer_type_map.py +13 -3
  35. funboost/funboost_config_deafult.py +0 -3
  36. funboost/function_result_web/templates/fun_result_table.html +1 -1
  37. funboost/publishers/base_publisher.py +8 -2
  38. funboost/publishers/http_publisher.py +20 -2
  39. funboost/publishers/rabbitmq_amqpstorm_publisher.py +8 -7
  40. funboost/publishers/rabbitmq_complex_routing_publisher.py +84 -0
  41. funboost/utils/redis_manager.py +11 -5
  42. {funboost-50.2.dist-info → funboost-50.4.dist-info}/METADATA +159 -98
  43. {funboost-50.2.dist-info → funboost-50.4.dist-info}/RECORD +46 -41
  44. {funboost-50.2.dist-info → funboost-50.4.dist-info}/WHEEL +1 -1
  45. funboost-50.2.dist-info/LICENSE +0 -203
  46. {funboost-50.2.dist-info → funboost-50.4.dist-info}/entry_points.txt +0 -0
  47. {funboost-50.2.dist-info → funboost-50.4.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,11 @@
1
+
2
+ """
3
+ 一个贡献,保存函数结果状态到 mysql postgre 等等,因为默认是使用mongo保存.
4
+
5
+ 可以在 @boost里面指定 user_custom_record_process_info_func= save_result_status_to_sqlalchemy
6
+ """
7
+
8
+
1
9
  import copy
2
10
  import functools
3
11
  import json
@@ -7,49 +15,8 @@ from sqlalchemy import create_engine
7
15
 
8
16
  from funboost import boost, FunctionResultStatus, funboost_config_deafult
9
17
 
10
- """
11
- -- 如果用户是先保存到mysql中而非mongodb,用户自己先创建表,用于保存函数消费状态和结果.
12
-
13
- CREATE TABLE funboost_consume_results
14
- (
15
- _id VARCHAR(255),
16
- `function` VARCHAR(255),
17
- host_name VARCHAR(255),
18
- host_process VARCHAR(255),
19
- insert_minutes VARCHAR(255),
20
- insert_time datetime,
21
- insert_time_str VARCHAR(255),
22
- msg_dict JSON,
23
- params JSON,
24
- params_str VARCHAR(255),
25
- process_id BIGINT(20),
26
- publish_time FLOAT,
27
- publish_time_str VARCHAR(255),
28
- queue_name VARCHAR(255),
29
- result VARCHAR(255),
30
- run_times INT,
31
- script_name VARCHAR(255),
32
- script_name_long VARCHAR(255),
33
- success BOOLEAN,
34
- task_id VARCHAR(255),
35
- thread_id BIGINT(20),
36
- time_cost FLOAT,
37
- time_end FLOAT,
38
- time_start FLOAT,
39
- total_thread INT,
40
- utime VARCHAR(255),
41
- `exception` MEDIUMTEXT ,
42
- rpc_result_expire_seconds BIGINT(20),
43
- primary key (_id),
44
- key idx_insert_time(insert_time),
45
- key idx_queue_name_insert_time(queue_name,insert_time),
46
- key idx_params_str(params_str)
47
- )
48
-
49
18
 
50
19
 
51
- """
52
-
53
20
 
54
21
  def _gen_insert_sql_and_values_by_dict(dictx: dict):
55
22
  key_list = [f'`{k}`' for k in dictx.keys()]
@@ -0,0 +1,47 @@
1
+
2
+ """
3
+ 一个贡献,保存函数结果状态到 mysql postgre 等等,因为默认是使用mongo保存.
4
+
5
+ 可以在 @boost里面指定 user_custom_record_process_info_func= save_result_status_to_sqlalchemy
6
+ """
7
+
8
+ import os
9
+ import copy
10
+ import functools
11
+ import json
12
+ import threading
13
+
14
+ import dataset
15
+
16
+ from funboost import boost, FunctionResultStatus, funboost_config_deafult,AbstractConsumer
17
+
18
+
19
+
20
+ pid__db_map = {}
21
+ _lock = threading.Lock()
22
+ def get_db(connect_url) -> dataset.Database:
23
+ """封装一个函数,判断pid"""
24
+ pid = os.getpid()
25
+ key = (pid, connect_url,)
26
+ if key not in pid__db_map:
27
+ with _lock:
28
+ if key not in pid__db_map:
29
+ pid__db_map[key] = dataset.connect(connect_url)
30
+ return pid__db_map[key]
31
+
32
+
33
+ connect_url ='mysql+pymysql://root:123456@127.0.0.1:3306/testdb7' # dataset或 SQLAlchemy 的url连接形式
34
+
35
+ # 方式一:@boost 装饰器里面使用函数钩子,user_custom_record_process_info_func
36
+ def save_result_status_use_dataset(result_status: FunctionResultStatus):
37
+ db = get_db(connect_url)
38
+ table = db['funboost_consume_results']
39
+ table.upsert(result_status.get_status_dict(), ['_id'])
40
+
41
+ # 方式二:装饰器里面使用 consumer_override_cls,重写 user_custom_record_process_info_func
42
+ class ResultStatusUseDatasetMixin(AbstractConsumer):
43
+ def user_custom_record_process_info_func(self, current_function_result_status: FunctionResultStatus):
44
+ # print(current_function_result_status.get_status_dict())
45
+ db = get_db(connect_url)
46
+ table = db['funboost_consume_results']
47
+ table.upsert(current_function_result_status.get_status_dict(), ['_id'])
funboost/core/booster.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
  import copy
3
3
  import inspect
4
+ from multiprocessing import Process
4
5
  import os
5
6
  import sys
6
7
  import types
@@ -96,7 +97,13 @@ class Booster:
96
97
  return types.MethodType(self, instance)
97
98
 
98
99
  def __call__(self, *args, **kwargs) -> Booster:
99
- if len(kwargs) == 0 and len(args) == 1 and isinstance(args[0], typing.Callable):
100
+ """
101
+ # 第一次调用__call__,是装饰函数,返回了Booster对象,从此之后,被消费函数就变成了Booster类型对象.
102
+ # Booster类型对象,怎么支持函数原来本身的直接运行功能? 那就是要让他走到 else 分支,直接用 self.consuming_function 函数本身去运行入参
103
+ # 这里非常巧妙
104
+ # 如果用户之后不打算使用funboost 的分布式函数调度功能,那么直接运行函数和原来一模一样,用户不需要删除 @boost装饰器 也能直接运行函数本身
105
+ """
106
+ if len(kwargs) == 0 and len(args) == 1 and isinstance(args[0], typing.Callable) and not isinstance(args[0], Booster):
100
107
  consuming_function = args[0]
101
108
  self.boost_params.consuming_function = consuming_function
102
109
  self.boost_params.consuming_function_raw = consuming_function
@@ -242,10 +249,10 @@ class BoostersManager:
242
249
  """
243
250
 
244
251
  # pid_queue_name__booster_map字典存放 {(进程id,queue_name):Booster对象}
245
- pid_queue_name__booster_map = {} # type: typing.Dict[typing.Tuple[int,str],Booster]
252
+ pid_queue_name__booster_map :typing.Dict[typing.Tuple[int,str],Booster]= {}
246
253
 
247
254
  # queue_name__boost_params_consuming_function_map 字典存放 {queue_name,(@boost的入参字典,@boost装饰的消费函数)}
248
- queue_name__boost_params_map = {} # type: typing.Dict[str,BoosterParams]
255
+ queue_name__boost_params_map :typing.Dict[str,BoosterParams]= {}
249
256
 
250
257
  pid_queue_name__has_start_consume_set = set()
251
258
 
@@ -392,6 +399,34 @@ class BoostersManager:
392
399
 
393
400
  m_consume = multi_process_consume_queues
394
401
 
402
+ @classmethod
403
+ def consume_group(cls, booster_group:str,block=False):
404
+ """
405
+ 根据@boost装饰器的 booster_group消费分组名字,启动多个消费函数;
406
+ """
407
+ if booster_group is None:
408
+ raise ValueError('booster_group 不能为None')
409
+ need_consume_queue_names = []
410
+ for queue_name in cls.get_all_queues():
411
+ booster= cls.get_or_create_booster_by_queue_name(queue_name)
412
+ if booster.boost_params.booster_group == booster_group:
413
+ need_consume_queue_names.append(queue_name)
414
+ flogger.info(f'according to booster_group:{booster_group} ,start consume queues: {need_consume_queue_names}')
415
+ for queue_name in need_consume_queue_names:
416
+ cls.get_or_create_booster_by_queue_name(queue_name).consume()
417
+ if block:
418
+ ctrl_c_recv()
419
+
420
+ @classmethod
421
+ def multi_process_consume_group(cls, booster_group:str, process_num=1):
422
+ """
423
+ 根据@boost装饰器的 booster_group消费分组名字,启动多个消费函数;
424
+ """
425
+ for _ in range(process_num):
426
+ Process(target=cls.consume_group,args=(booster_group,True)).start()
427
+
428
+ m_consume_group = multi_process_consume_group
429
+
395
430
  @classmethod
396
431
  def multi_process_consume_all_queues(cls, process_num=1):
397
432
  """
@@ -0,0 +1,229 @@
1
+ # from __future__ import annotations
2
+ # import typing
3
+ # if typing.TYPE_CHECKING:
4
+ # from logging import Logger
5
+
6
+ from logging import Logger
7
+ from funboost.constant import BrokerEnum
8
+
9
+
10
+ broker_kind__exclusive_config_default_map: dict = {}
11
+
12
+
13
+ def register_broker_exclusive_config_default(
14
+ broker_kind: str, broker_exclusive_config_default: dict
15
+ ):
16
+ broker_kind__exclusive_config_default_map[broker_kind] = broker_exclusive_config_default
17
+
18
+
19
+
20
+ def generate_broker_exclusive_config(
21
+ broker_kind: str,
22
+ user_broker_exclusive_config: dict,
23
+ logger: Logger,
24
+ ):
25
+ broker_exclusive_config_default = broker_kind__exclusive_config_default_map.get(
26
+ broker_kind, {}
27
+ )
28
+ broker_exclusive_config_keys = broker_exclusive_config_default.keys()
29
+ if user_broker_exclusive_config:
30
+ if set(user_broker_exclusive_config).issubset(broker_exclusive_config_keys):
31
+ logger.info(
32
+ f"当前消息队列中间件能支持特殊独有配置 {broker_exclusive_config_default.keys()}"
33
+ )
34
+ else:
35
+ logger.warning(f"""当前消息队列中间件含有不支持的特殊配置 {user_broker_exclusive_config.keys()} ,
36
+ 能支持的特殊独有配置包括 {broker_exclusive_config_keys}""")
37
+ broker_exclusive_config_merge = dict()
38
+ broker_exclusive_config_merge.update(broker_exclusive_config_default)
39
+ broker_exclusive_config_merge.update(user_broker_exclusive_config)
40
+ return broker_exclusive_config_merge
41
+
42
+
43
+ # celery的可以配置项大全 https://docs.celeryq.dev/en/stable/userguide/configuration.html#new-lowercase-settings
44
+ # celery @app.task() 所有可以配置项可以看 D:\ProgramData\Miniconda3\Lib\site-packages\celery\app\task.py
45
+ register_broker_exclusive_config_default(BrokerEnum.CELERY, {"celery_task_config": {}})
46
+
47
+
48
+ # dramatiq_actor_options 的值可以是:
49
+ # {'max_age', 'throws', 'pipe_target', 'pipe_ignore', 'on_success', 'retry_when', 'time_limit', 'min_backoff', 'max_retries', 'max_backoff', 'notify_shutdown', 'on_failure'}
50
+ register_broker_exclusive_config_default(
51
+ BrokerEnum.DRAMATIQ, {"dramatiq_actor_options": {}}
52
+ )
53
+
54
+
55
+ register_broker_exclusive_config_default(
56
+ BrokerEnum.GRPC,
57
+ {
58
+ "host": "127.0.0.1",
59
+ "port": None,
60
+ },
61
+ )
62
+
63
+ register_broker_exclusive_config_default(
64
+ BrokerEnum.HTTP,
65
+ {
66
+ "host": "127.0.0.1",
67
+ "port": None,
68
+ },
69
+ )
70
+
71
+
72
+ """
73
+ retries=0, retry_delay=0, priority=None, context=False,
74
+ name=None, expires=None, **kwargs
75
+ """
76
+ register_broker_exclusive_config_default(BrokerEnum.HUEY, {"huey_task_kwargs": {}})
77
+
78
+
79
+ """
80
+ auto_offset_reset 介绍
81
+
82
+ auto_offset_reset (str): A policy for resetting offsets on
83
+ OffsetOutOfRange errors: 'earliest' will move to the oldest
84
+ available message, 'latest' will move to the most recent. Any
85
+ other value will raise the exception. Default: 'latest'.
86
+ """
87
+ register_broker_exclusive_config_default(
88
+ BrokerEnum.KAFKA,
89
+ {
90
+ "group_id": "funboost_kafka",
91
+ "auto_offset_reset": "earliest",
92
+ "num_partitions": 10,
93
+ "replication_factor": 1,
94
+ },
95
+ )
96
+
97
+
98
+ register_broker_exclusive_config_default(
99
+ BrokerEnum.KAFKA_CONFLUENT,
100
+ {
101
+ "group_id": "funboost_kafka",
102
+ "auto_offset_reset": "earliest",
103
+ "num_partitions": 10,
104
+ "replication_factor": 1,
105
+ },
106
+ )
107
+
108
+
109
+ """
110
+ # prefetch_count 是预获取消息数量
111
+ transport_options是kombu的transport_options 。
112
+ 例如使用kombu使用redis作为中间件时候,可以设置 visibility_timeout 来决定消息取出多久没有ack,就自动重回队列。
113
+ kombu的每个中间件能设置什么 transport_options 可以看 kombu的源码中的 transport_options 参数说明。
114
+ """
115
+ register_broker_exclusive_config_default(
116
+ BrokerEnum.KOMBU,
117
+ {
118
+ "kombu_url": None, # 如果这里也配置了kombu_url,则优先使用跟着你的kombu_url,否则使用funboost_config. KOMBU_URL
119
+ "transport_options": {}, # transport_options是kombu的transport_options 。
120
+ "prefetch_count": 500,
121
+ },
122
+ )
123
+
124
+
125
+ register_broker_exclusive_config_default(
126
+ BrokerEnum.MYSQL_CDC, {"BinLogStreamReaderConfig": {}}
127
+ ) # 入参是 BinLogStreamReader 的入参BinLogStreamReader
128
+
129
+
130
+ """
131
+ consumer_type Members:
132
+ Exclusive Shared Failover KeyShared
133
+ """
134
+ register_broker_exclusive_config_default(
135
+ BrokerEnum.PULSAR,
136
+ {
137
+ "subscription_name": "funboost_group",
138
+ "replicate_subscription_state_enabled": True,
139
+ "consumer_type": "Shared",
140
+ },
141
+ )
142
+
143
+
144
+ register_broker_exclusive_config_default(
145
+ BrokerEnum.RABBITMQ_AMQPSTORM,
146
+ {
147
+ "queue_durable": True,
148
+ "x-max-priority": None, # x-max-priority 是 rabbitmq的优先级队列配置,必须为整数,强烈建议要小于5。为None就代表队列不支持优先级。
149
+ "no_ack": False,
150
+ },
151
+ )
152
+
153
+
154
+ register_broker_exclusive_config_default(
155
+ BrokerEnum.RABBITMQ_COMPLEX_ROUTING,
156
+ {
157
+ "queue_durable": True,
158
+ "x-max-priority": None, # x-max-priority 是 rabbitmq的优先级队列配置,必须为整数,强烈建议要小于5。为None就代表队列不支持优先级。
159
+ "no_ack": False,
160
+ "exchange_name": "",
161
+ "exchange_type": "direct",
162
+ "routing_key_for_bind": None, # 绑定交换机和队列时使用的key。None表示使用queue_name作为绑定键;""(空字符串)也表示使用queue_name。对于fanout和headers交换机,此值会被忽略。对于topic交换机,可以使用通配符*和#。
163
+ "routing_key_for_publish": None,
164
+ # for headers exchange
165
+ "headers_for_bind": {},
166
+ "x_match_for_bind": "all", # all or any
167
+ "exchange_declare_durable": True,
168
+ },
169
+ )
170
+
171
+
172
+ register_broker_exclusive_config_default(
173
+ BrokerEnum.REDIS,
174
+ {
175
+ "redis_bulk_push": 1,
176
+ "pull_msg_batch_size": 100,
177
+ },
178
+ ) # redis_bulk_push 是否redis批量推送
179
+
180
+
181
+ register_broker_exclusive_config_default(
182
+ BrokerEnum.REDIS_ACK_ABLE, {"pull_msg_batch_size": 100}
183
+ )
184
+
185
+ # RedisConsumerAckUsingTimeout的ack timeot 是代表消息取出后过了多少秒还未ack,就自动重回队列。这个配置一定要大于函数消耗时间,否则不停的重回队列。
186
+ """
187
+ 用法,如何设置ack_timeout,是使用 broker_exclusive_config 中传递,就能覆盖这里的3600,用户不用改BROKER_EXCLUSIVE_CONFIG_DEFAULT的源码。
188
+ @boost(BoosterParams(queue_name='test_redis_ack__use_timeout', broker_kind=BrokerEnum.REIDS_ACK_USING_TIMEOUT,
189
+ concurrent_num=5, log_level=20, broker_exclusive_config={'ack_timeout': 30}))
190
+ """
191
+ register_broker_exclusive_config_default(
192
+ BrokerEnum.REIDS_ACK_USING_TIMEOUT, {"ack_timeout": 3600}
193
+ )
194
+
195
+
196
+ register_broker_exclusive_config_default(
197
+ BrokerEnum.REDIS_PRIORITY, {"x-max-priority": None}
198
+ ) # x-max-priority 是 rabbitmq的优先级队列配置,必须为整数,强烈建议要小于5。为None就代表队列不支持优先级。
199
+
200
+
201
+ register_broker_exclusive_config_default(
202
+ BrokerEnum.REDIS_STREAM,
203
+ {
204
+ "group": "funboost_group",
205
+ "pull_msg_batch_size": 100,
206
+ },
207
+ )
208
+
209
+
210
+ register_broker_exclusive_config_default(
211
+ BrokerEnum.TCP,
212
+ {
213
+ "host": "127.0.0.1",
214
+ "port": None,
215
+ "bufsize": 10240,
216
+ },
217
+ )
218
+
219
+
220
+ register_broker_exclusive_config_default(
221
+ BrokerEnum.UDP,
222
+ {
223
+ "host": "127.0.0.1",
224
+ "port": None,
225
+ "bufsize": 10240,
226
+ },
227
+ )
228
+
229
+ register_broker_exclusive_config_default(BrokerEnum.ZEROMQ, {"port": None})
@@ -4,19 +4,20 @@ import sys
4
4
  import datetime
5
5
 
6
6
  import typing
7
-
8
- from nb_time import NbTime
7
+ import threading
8
+ from nb_time import NbTime,NowTimeStrCache
9
9
  from funboost.funboost_config_deafult import FunboostCommonConfig
10
10
 
11
+
11
12
  class FunboostTime(NbTime):
12
13
  default_formatter = NbTime.FORMATTER_DATETIME_NO_ZONE
13
14
 
14
- def get_time_zone_str(self,time_zone: typing.Union[str, datetime.tzinfo,None] = None):
15
- return time_zone or self.default_time_zone or FunboostCommonConfig.TIMEZONE or self.get_localzone_name()
15
+ def get_time_zone_str(self, time_zone: typing.Union[str, datetime.tzinfo, None] = None):
16
+ return time_zone or self.default_time_zone or FunboostCommonConfig.TIMEZONE or self.get_localzone_name()
16
17
 
17
18
  @staticmethod
18
- def _get_tow_digist(num:int)->str:
19
- if len(str(num)) ==1:
19
+ def _get_tow_digist(num: int) -> str:
20
+ if len(str(num)) == 1:
20
21
  return f'0{num}'
21
22
  return str(num)
22
23
 
@@ -28,49 +29,13 @@ class FunboostTime(NbTime):
28
29
  return t_str
29
30
 
30
31
 
32
+ def fast_get_now_time_str() -> str:
33
+ return NowTimeStrCache.fast_get_now_time_str(FunboostCommonConfig.TIMEZONE)
31
34
 
32
35
 
33
- # 缓存时区对象,提升性能(避免重复解析)
34
- _tz_cache = {}
35
-
36
- def get_now_time_str_by_tz(tz_name: str=None) -> str:
37
- """
38
- 根据时区名(如 'Asia/Shanghai')返回当前时间字符串,格式:'%Y-%m-%d %H:%M:%S'
39
-
40
- 兼容 Python 3.6+,优先使用 zoneinfo(3.9+),否则尝试 pytz
41
-
42
- :param tz_name: IANA 时区名称,如 'Asia/Shanghai', 'America/New_York'
43
- :return: 格式化时间字符串
44
- """
45
- # 检查缓存
46
- tz_name = tz_name or FunboostCommonConfig.TIMEZONE
47
- if tz_name not in _tz_cache:
48
- if sys.version_info >= (3, 9):
49
- from zoneinfo import ZoneInfo
50
- _tz_cache[tz_name] = ZoneInfo(tz_name)
51
- else:
52
- # Python < 3.9,使用 pytz
53
- try:
54
- import pytz
55
- _tz_cache[tz_name] = pytz.timezone(tz_name)
56
- except ImportError:
57
- raise RuntimeError(
58
- f"Python < 3.9 requires 'pytz' to handle timezones. "
59
- f"Install it with: pip install pytz"
60
- ) from None
61
- except pytz.UnknownTimeZoneError:
62
- raise pytz.UnknownTimeZoneError(tz_name)
63
-
64
- tz = _tz_cache[tz_name]
65
-
66
- # 获取当前时间并格式化(注意:datetime.now(tz) 是最高效的方式)
67
- now = datetime.datetime.now(tz)
68
- return f'{now.year:04d}-{now.month:02d}-{now.day:02d} {now.hour:02d}:{now.minute:02d}:{now.second:02d}'
69
- # return now.strftime("%Y-%m-%d %H:%M:%S")
70
-
71
36
  if __name__ == '__main__':
72
37
  print(FunboostTime().get_str())
73
- tz=pytz.timezone(FunboostCommonConfig.TIMEZONE)
38
+ tz = pytz.timezone(FunboostCommonConfig.TIMEZONE)
74
39
  for i in range(1000000):
75
40
  pass
76
41
  # FunboostTime()#.get_str_fast()
@@ -201,15 +201,26 @@ class BoosterParams(BaseJsonAbleModel):
201
201
  schedule_tasks_on_main_thread: bool = False # 直接在主线程调度任务,意味着不能直接在当前主线程同时开启两个消费者。
202
202
 
203
203
  is_auto_start_consuming_message: bool = False # 是否在定义后就自动启动消费,无需用户手动写 .consume() 来启动消息消费。
204
+
205
+ # booster_group :消费分组名字, BoostersManager.consume_group 时候根据 booster_group 启动多个消费函数,减少需要写 f1.consume() f2.consume() ...这种。
206
+ # 不像BoostersManager.consume_all() 会启动所有不相关消费函数,也不像 f1.consume() f2.consume() 这样需要逐个启动消费函数。
207
+ # 可以根据业务逻辑创建不同的分组,实现灵活的消费启动策略。
208
+ # 用法见文档 4.2d.3 章节. 使用 BoostersManager ,通过 consume_group 启动一组消费函数
209
+ booster_group:typing.Union[str, None] = None
204
210
 
205
211
  consuming_function: typing.Optional[typing.Callable] = None # 消费函数,在@boost时候不用指定,因为装饰器知道下面的函数.
206
212
  consuming_function_raw: typing.Optional[typing.Callable] = None # 不需要传递,自动生成
207
213
  consuming_function_name: str = '' # 不需要传递,自动生成
208
214
 
209
215
 
216
+ """
217
+ # 加上一个不同种类中间件非通用的配置,不同中间件自身独有的配置,不是所有中间件都兼容的配置,因为框架支持30种消息队列,消息队列不仅仅是一般的先进先出queue这么简单的概念,
218
+ # 例如kafka支持消费者组,rabbitmq也支持各种独特概念例如各种ack机制 复杂路由机制,有的中间件原生能支持消息优先级有的中间件不支持,每一种消息队列都有独特的配置参数意义,可以通过这里传递。
219
+ # 每种中间件能传递的键值对可以看 funboost/core/broker_kind__exclusive_config_default.py 的 BROKER_EXCLUSIVE_CONFIG_DEFAULT 属性。
220
+ """
221
+ broker_exclusive_config: dict = {}
222
+
210
223
 
211
- broker_exclusive_config: dict = {} # 加上一个不同种类中间件非通用的配置,不同中间件自身独有的配置,不是所有中间件都兼容的配置,因为框架支持30种消息队列,消息队列不仅仅是一般的先进先出queue这么简单的概念,
212
- # 例如kafka支持消费者组,rabbitmq也支持各种独特概念例如各种ack机制 复杂路由机制,有的中间件原生能支持消息优先级有的中间件不支持,每一种消息队列都有独特的配置参数意义,可以通过这里传递。每种中间件能传递的键值对可以看consumer类的 BROKER_EXCLUSIVE_CONFIG_DEFAULT
213
224
 
214
225
  should_check_publish_func_params: bool = True # 消息发布时候是否校验消息发布内容,比如有的人发布消息,函数只接受a,b两个入参,他去传2个入参,或者传参不存在的参数名字; 如果消费函数加了装饰器 ,你非要写*args,**kwargs,那就需要关掉发布消息时候的函数入参检查
215
226
  publish_msg_log_use_full_msg: bool = False # 发布到消息队列的消息内容的日志,是否显示消息的完整体,还是只显示函数入参。
@@ -228,9 +239,20 @@ class BoosterParams(BaseJsonAbleModel):
228
239
  COMMON_FUNCTION = 'COMMON_FUNCTION'
229
240
  """
230
241
 
242
+ """
243
+ user_options:
244
+ 用户额外自定义的配置,高级用户或者奇葩需求可以用得到,用户可以自由发挥,存放任何设置.
245
+ user_options 提供了一个统一的、用户自定义的命名空间,让用户可以为自己的“奇葩需求”或“高级定制”传递配置,而无需等待框架开发者添加官方支持。
246
+ funboost 是自由框架不是奴役框架,不仅消费函数逻辑自由,目录层级结构自由,自定义奇葩扩展也要追求自由,用户不用改funboost BoosterParams 源码来加装饰器参数
247
+
248
+ 使用场景见文档 4b.6 章节.
249
+ """
250
+ user_options: dict = {} # 用户自定义的配置,高级用户或者奇葩需求可以用得到,用户可以自由发挥,存放任何设置,例如配合 consumer_override_cls中读取 或 register_custom_broker 使用
251
+
252
+
231
253
  auto_generate_info: dict = {} # 自动生成的信息,不需要用户主动传参.
232
-
233
-
254
+
255
+
234
256
 
235
257
  @root_validator(skip_on_failure=True, )
236
258
  def check_values(cls, values: dict):
@@ -358,6 +380,8 @@ class PublisherParams(BaseJsonAbleModel):
358
380
  publish_msg_log_use_full_msg: bool = False # 发布到消息队列的消息内容的日志,是否显示消息的完整体,还是只显示函数入参。
359
381
  consuming_function_kind: typing.Optional[str] = None # 自动生成的信息,不需要用户主动传参.
360
382
  rpc_timeout: int = 1800 # rpc模式下,等待rpc结果返回的超时时间
383
+ user_options: dict = {} # 用户自定义的配置,高级用户或者奇葩需求可以用得到,用户可以自由发挥,存放任何设置.
384
+
361
385
 
362
386
  if __name__ == '__main__':
363
387
  from funboost.concurrent_pool import FlexibleThreadPool
@@ -3,7 +3,7 @@ import pytz
3
3
  import time
4
4
  import uuid
5
5
  import datetime
6
- from funboost.core.funboost_time import FunboostTime, get_now_time_str_by_tz
6
+ from funboost.core.funboost_time import FunboostTime, fast_get_now_time_str
7
7
 
8
8
 
9
9
  def get_publish_time(paramsx: dict):
@@ -57,13 +57,13 @@ class MsgGenerater:
57
57
  def generate_publish_time() -> float:
58
58
  return round(time.time(),4)
59
59
 
60
- # @staticmethod # 性能不好
61
- # def generate_publish_time_format() -> str:
62
- # return FunboostTime().get_str()
63
-
60
+
64
61
  @staticmethod
65
62
  def generate_publish_time_format() -> str:
66
- return get_now_time_str_by_tz()
63
+ # return FunboostTime().get_str() # 性能不好
64
+ # return get_now_time_str_by_tz() # 2秒100万次
65
+ return fast_get_now_time_str() # 0.4秒100万次
66
+
67
67
 
68
68
  @classmethod
69
69
  def generate_pulish_time_and_task_id(cls,queue_name:str,task_id=None):
@@ -81,8 +81,9 @@ if __name__ == '__main__':
81
81
  print(FunboostTime())
82
82
  for i in range(1000000):
83
83
  # time.time()
84
- # MsgGenerater.generate_publish_time_format()
84
+ MsgGenerater.generate_publish_time_format()
85
+ # FunboostTime().get_str()
85
86
 
86
- datetime.datetime.now(tz=pytz.timezone(FunboostCommonConfig.TIMEZONE)).strftime(FunboostTime.FORMATTER_DATETIME_NO_ZONE)
87
+ # datetime.datetime.now(tz=pytz.timezone(FunboostCommonConfig.TIMEZONE)).strftime(FunboostTime.FORMATTER_DATETIME_NO_ZONE)
87
88
 
88
89
  print(FunboostTime())
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import threading
2
3
  import time
3
4
 
4
5
  import typing
@@ -278,6 +279,32 @@ class ResultFromMongo(MongoMixin):
278
279
  return (self.mongo_row or {}).get('result', NO_RESULT)
279
280
 
280
281
 
282
+ class FutureStatusResult:
283
+ """
284
+ 用于sync_call模式的结果等待和通知
285
+ 使用threading.Event实现同步等待
286
+ """
287
+ def __init__(self, call_type: str):
288
+ self.execute_finish_event = threading.Event()
289
+ self.staus_result_obj: FunctionResultStatus = None
290
+ self.call_type = call_type # sync_call or publish
291
+
292
+ def set_finish(self):
293
+ """标记任务完成"""
294
+ self.execute_finish_event.set()
295
+
296
+ def wait_finish(self, rpc_timeout):
297
+ """等待任务完成,带超时"""
298
+ return self.execute_finish_event.wait(rpc_timeout)
299
+
300
+ def set_staus_result_obj(self, staus_result_obj: FunctionResultStatus):
301
+ """设置任务执行结果"""
302
+ self.staus_result_obj = staus_result_obj
303
+
304
+ def get_staus_result_obj(self):
305
+ """获取任务执行结果"""
306
+ return self.staus_result_obj
307
+
281
308
  if __name__ == '__main__':
282
309
  print(ResultFromMongo('test_queue77h6_result:764a1ba2-14eb-49e2-9209-ac83fc5db1e8').get_status_and_result())
283
310
  print(ResultFromMongo('test_queue77h6_result:5cdb4386-44cc-452f-97f4-9e5d2882a7c1').get_result())
@@ -1,7 +1,7 @@
1
1
  import typing
2
2
 
3
3
  from funboost.publishers.empty_publisher import EmptyPublisher
4
- from funboost.publishers.http_publisher import HTTPPublisher
4
+
5
5
  from funboost.publishers.nats_publisher import NatsPublisher
6
6
  from funboost.publishers.peewee_publisher import PeeweePublisher
7
7
  from funboost.publishers.redis_publisher_lpush import RedisPublisherLpush
@@ -28,7 +28,7 @@ from funboost.publishers.httpsqs_publisher import HttpsqsPublisher
28
28
  from funboost.consumers.empty_consumer import EmptyConsumer
29
29
  from funboost.consumers.redis_consumer_priority import RedisPriorityConsumer
30
30
  from funboost.consumers.redis_pubsub_consumer import RedisPbSubConsumer
31
- from funboost.consumers.http_consumer import HTTPConsumer
31
+
32
32
  from funboost.consumers.kafka_consumer import KafkaConsumer
33
33
  from funboost.consumers.local_python_queue_consumer import LocalPythonQueueConsumer
34
34
  from funboost.consumers.mongomq_consumer import MongoMqConsumer
@@ -74,7 +74,7 @@ broker_kind__publsiher_consumer_type_map = {
74
74
  BrokerEnum.HTTPSQS: (HttpsqsPublisher, HttpsqsConsumer),
75
75
  BrokerEnum.UDP: (UDPPublisher, UDPConsumer),
76
76
  BrokerEnum.TCP: (TCPPublisher, TCPConsumer),
77
- BrokerEnum.HTTP: (HTTPPublisher, HTTPConsumer),
77
+
78
78
  BrokerEnum.NATS: (NatsPublisher, NatsConsumer),
79
79
  BrokerEnum.TXT_FILE: (TxtFilePublisher, TxtFileConsumer),
80
80
  BrokerEnum.PEEWEE: (PeeweePublisher, PeeweeConsumer),
@@ -115,6 +115,11 @@ def regist_to_funboost(broker_kind: str):
115
115
  from funboost.consumers.rabbitmq_amqpstorm_consumer import RabbitmqConsumerAmqpStorm
116
116
  register_custom_broker(BrokerEnum.RABBITMQ_AMQPSTORM, RabbitmqPublisherUsingAmqpStorm, RabbitmqConsumerAmqpStorm)
117
117
 
118
+ if broker_kind == BrokerEnum.RABBITMQ_COMPLEX_ROUTING:
119
+ from funboost.publishers.rabbitmq_complex_routing_publisher import RabbitmqComplexRoutingPublisher
120
+ from funboost.consumers.rabbitmq_complex_routing_consumer import RabbitmqComplexRoutingConsumer
121
+ register_custom_broker(BrokerEnum.RABBITMQ_COMPLEX_ROUTING, RabbitmqComplexRoutingPublisher, RabbitmqComplexRoutingConsumer)
122
+
118
123
  if broker_kind == BrokerEnum.RABBITMQ_RABBITPY:
119
124
  from funboost.publishers.rabbitmq_rabbitpy_publisher import RabbitmqPublisherUsingRabbitpy
120
125
  from funboost.consumers.rabbitmq_rabbitpy_consumer import RabbitmqConsumerRabbitpy
@@ -184,6 +189,11 @@ def regist_to_funboost(broker_kind: str):
184
189
  from funboost.consumers.mysql_cdc_consumer import MysqlCdcConsumer
185
190
  from funboost.publishers.mysql_cdc_publisher import MysqlCdcPublisher
186
191
  register_custom_broker(broker_kind, MysqlCdcPublisher, MysqlCdcConsumer)
192
+
193
+ if broker_kind == BrokerEnum.HTTP:
194
+ from funboost.consumers.http_consumer import HTTPConsumer
195
+ from funboost.publishers.http_publisher import HTTPPublisher
196
+ register_custom_broker(broker_kind, HTTPPublisher, HTTPConsumer)
187
197
 
188
198
  if __name__ == '__main__':
189
199
  import sys