funboost 50.1__py3-none-any.whl → 50.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of funboost might be problematic. Click here for more details.

funboost/__init__.py CHANGED
@@ -13,7 +13,7 @@ set_frame_config这个模块的 use_config_form_funboost_config_module() 是核
13
13
  这段注释说明和使用的用户无关,只和框架开发人员有关.
14
14
  '''
15
15
 
16
- __version__ = "50.1"
16
+ __version__ = "50.3"
17
17
 
18
18
  from funboost.set_frame_config import show_frame_config
19
19
 
@@ -168,8 +168,7 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
168
168
  # if self.consumer_params.concurrent_mode == ConcurrentModeEnum.ASYNC and self.consumer_params.specify_async_loop is None:
169
169
  # self.consumer_params.specify_async_loop= get_or_create_event_loop()
170
170
  self._lock_for_count_execute_task_times_every_unit_time = Lock()
171
- if self.consumer_params.concurrent_mode == ConcurrentModeEnum.ASYNC:
172
- self._async_lock_for_count_execute_task_times_every_unit_time = asyncio.Lock()
171
+
173
172
  # self._unit_time_for_count = 10 # 每隔多少秒计数,显示单位时间内执行多少次,暂时固定为10秒。
174
173
  # self._execute_task_times_every_unit_time = 0 # 每单位时间执行了多少次任务。
175
174
  # self._execute_task_times_every_unit_time_fail =0 # 每单位时间执行了多少次任务失败。
@@ -491,7 +490,6 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
491
490
  return msg
492
491
 
493
492
  def _submit_task(self, kw):
494
-
495
493
  kw['body'] = self._convert_msg_before_run(kw['body'])
496
494
  self._print_message_get_from_broker(kw['body'])
497
495
  if self._judge_is_daylight():
@@ -499,6 +497,7 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
499
497
  time.sleep(self.time_interval_for_check_do_not_run_time)
500
498
  return
501
499
  function_only_params = delete_keys_and_return_new_dict(kw['body'], )
500
+ kw['function_only_params'] = function_only_params
502
501
  if self._get_priority_conf(kw, 'do_task_filtering') and self._redis_filter.check_value_exists(
503
502
  function_only_params,self._get_priority_conf(kw, 'filter_str')): # 对函数的参数进行检查,过滤已经执行过并且成功的任务。
504
503
  self.logger.warning(f'redis的 [{self._redis_filter_key_name}] 键 中 过滤任务 {kw["body"]}')
@@ -640,6 +639,9 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
640
639
  def _frame_custom_record_process_info_func(self,current_function_result_status: FunctionResultStatus,kw:dict):
641
640
  pass
642
641
 
642
+ async def _aio_frame_custom_record_process_info_func(self,current_function_result_status: FunctionResultStatus,kw:dict):
643
+ pass
644
+
643
645
  def user_custom_record_process_info_func(self, current_function_result_status: FunctionResultStatus,): # 这个可以继承
644
646
  pass
645
647
 
@@ -688,7 +690,7 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
688
690
  max_retry_times = self._get_priority_conf(kw, 'max_retry_times')
689
691
  current_function_result_status = FunctionResultStatus(self.queue_name, self.consuming_function.__name__, kw['body'], )
690
692
  current_retry_times = 0
691
- function_only_params = delete_keys_and_return_new_dict(kw['body'])
693
+ function_only_params = kw['function_only_params']
692
694
  for current_retry_times in range(max_retry_times + 1):
693
695
  current_function_result_status.run_times = current_retry_times + 1
694
696
  current_function_result_status.run_status = RunStatus.running
@@ -747,10 +749,10 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
747
749
  # noinspection PyProtectedMember
748
750
  def _run_consuming_function_with_confirm_and_retry(self, kw: dict, current_retry_times,
749
751
  function_result_status: FunctionResultStatus, ):
750
- function_only_params = delete_keys_and_return_new_dict(kw['body']) if self._do_not_delete_extra_from_msg is False else kw['body']
752
+ function_only_params = kw['function_only_params'] if self._do_not_delete_extra_from_msg is False else kw['body']
751
753
  task_id = kw['body']['extra']['task_id']
752
754
  t_start = time.time()
753
- # function_result_status.run_times = current_retry_times + 1
755
+
754
756
  fct = funboost_current_task()
755
757
  fct_context = FctContext(function_params=function_only_params,
756
758
  full_msg=kw['body'],
@@ -837,17 +839,47 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
837
839
 
838
840
  function_result_status.result = FunctionResultStatus.FUNC_RUN_ERROR
839
841
  return function_result_status
840
-
842
+
843
+ def _gen_asyncio_objects(self):
844
+ if getattr(self, '_async_lock_for_count_execute_task_times_every_unit_time', None) is None:
845
+ self._async_lock_for_count_execute_task_times_every_unit_time = asyncio.Lock()
846
+
841
847
  # noinspection PyProtectedMember
842
848
  async def _async_run(self, kw: dict, ):
843
- # """虽然和上面有点大面积重复相似,这个是为了asyncio模式的,asyncio模式真的和普通同步模式的代码思维和形式区别太大,
844
- # 框架实现兼容async的消费函数很麻烦复杂,连并发池都要单独写"""
849
+ """
850
+ 虽然 async def _async_run 和上面的 def _run 有点大面积结构重复相似,这个是为了asyncio模式的,
851
+ asyncio模式真的和普通同步模式的代码思维和形式区别太大,
852
+ 框架实现兼容async的消费函数很麻烦复杂,连并发池都要单独写
853
+
854
+ _run 和 _async_run 无法合并成一个方法:
855
+ 因为在一个函数体内部,您无法根据条件来决定是否使用 await。
856
+
857
+ Python 语法不允许这样做:
858
+ # 伪代码,这是无效的
859
+ def _unified_run(self, kw, is_async):
860
+ # ...
861
+ if is_async:
862
+ await asyncio.sleep(1) # 'await' outside async function 经典报错
863
+ else:
864
+ time.sleep(1)
865
+
866
+ 不能在同步函数里面去写 await,只要一个函数里出现了 await,这个函数就必须被声明为 async def
867
+
868
+
869
+
870
+ funboost 这个代价算小了,为了支持异步的全流程生态包括发布/消费/获取rpc结果,对asyncio的累计专门投入代码不到500行.
871
+ 如果是celery 改造适配asyncio,起码要增加10倍以上的代码量,改5000行代码都搞不定支持真asyncio并发.
872
+ 我说的是支持兼容真asyncio并发,而不是每个线程内部搞个临时loop,然后临时loop.run_until_complete(用户async函数) 这种伪asyncio并发,
873
+ 真asyncio并发,是单个loop里面运行无数协程,
874
+ 伪asyncio并发是在每个线程启动一个临时的loop,每个loop仅仅运行一个协程,然后等待这个协程结束,这完全违背了 asyncio 的核心初心理念,这种比多线程性能本身还差.
875
+ """
845
876
  try:
877
+ self._gen_asyncio_objects()
846
878
  t_start_run_fun = time.time()
847
879
  max_retry_times = self._get_priority_conf(kw, 'max_retry_times')
848
880
  current_function_result_status = FunctionResultStatus(self.queue_name, self.consuming_function.__name__, kw['body'], )
849
881
  current_retry_times = 0
850
- function_only_params = delete_keys_and_return_new_dict(kw['body'])
882
+ function_only_params = kw['function_only_params']
851
883
  for current_retry_times in range(max_retry_times + 1):
852
884
  current_function_result_status.run_times = current_retry_times + 1
853
885
  current_function_result_status.run_status = RunStatus.running
@@ -888,11 +920,11 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
888
920
 
889
921
  if (current_function_result_status.success is False and current_retry_times == max_retry_times) or current_function_result_status.success is True:
890
922
  await simple_run_in_executor(push_result)
891
-
892
923
  async with self._async_lock_for_count_execute_task_times_every_unit_time:
893
924
  self.metric_calculation.cal(t_start_run_fun, current_function_result_status)
894
925
 
895
926
  self._frame_custom_record_process_info_func(current_function_result_status,kw)
927
+ await self._aio_frame_custom_record_process_info_func(current_function_result_status,kw)
896
928
  self.user_custom_record_process_info_func(current_function_result_status,) # 两种方式都可以自定义,记录结果.建议使用文档4.21.b的方式继承来重写
897
929
  await self.aio_user_custom_record_process_info_func(current_function_result_status,)
898
930
  if self.consumer_params.user_custom_record_process_info_func:
@@ -911,8 +943,8 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
911
943
  function_result_status: FunctionResultStatus, ):
912
944
  """虽然和上面有点大面积重复相似,这个是为了asyncio模式的,asyncio模式真的和普通同步模式的代码思维和形式区别太大,
913
945
  框架实现兼容async的消费函数很麻烦复杂,连并发池都要单独写"""
914
- function_only_params = delete_keys_and_return_new_dict(kw['body']) if self._do_not_delete_extra_from_msg is False else kw['body']
915
- function_result_status.run_times = current_retry_times + 1
946
+ function_only_params = kw['function_only_params'] if self._do_not_delete_extra_from_msg is False else kw['body']
947
+
916
948
  # noinspection PyBroadException
917
949
  t_start = time.time()
918
950
  fct = funboost_current_task()
@@ -9,6 +9,7 @@ from faststream import FastStream,Context
9
9
  from faststream.annotations import Logger
10
10
 
11
11
  from funboost.concurrent_pool.async_helper import simple_run_in_executor
12
+ from funboost.core.serialization import Serialization
12
13
  from funboost.core.helper_funs import delete_keys_and_return_new_dict
13
14
 
14
15
 
@@ -10,29 +10,12 @@ import time
10
10
  from funboost import FunctionResultStatus
11
11
  from funboost.assist.grpc_helper import funboost_grpc_pb2_grpc, funboost_grpc_pb2
12
12
  from funboost.consumers.base_consumer import AbstractConsumer
13
+ from funboost.core.msg_result_getter import FutureStatusResult
13
14
  from funboost.core.serialization import Serialization
14
15
  from funboost.core.exceptions import FunboostWaitRpcResultTimeout
15
16
  from funboost.concurrent_pool.flexible_thread_pool import FlexibleThreadPool
16
17
 
17
18
 
18
- class FutureStatusResult:
19
- def __init__(self,call_type:str):
20
- self.execute_finish_event = threading.Event()
21
- self.staus_result_obj: FunctionResultStatus = None
22
- self.call_type = call_type # sync_call or publish
23
-
24
- def set_finish(self):
25
- self.execute_finish_event.set()
26
-
27
- def wait_finish(self,rpc_timeout):
28
- return self.execute_finish_event.wait(rpc_timeout)
29
-
30
- def set_staus_result_obj(self, staus_result_obj:FunctionResultStatus):
31
- self.staus_result_obj = staus_result_obj
32
-
33
- def get_staus_result_obj(self):
34
- return self.staus_result_obj
35
-
36
19
 
37
20
 
38
21
 
@@ -1,19 +1,23 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  # @Author : ydf
3
3
  # @Time : 2022/8/8 0008 13:32
4
- import asyncio
5
- import json
4
+ import logging
5
+ import threading
6
6
 
7
- # from aiohttp import web
8
- # from aiohttp.web_request import Request
7
+
8
+ from flask import Flask, request
9
9
 
10
10
  from funboost.consumers.base_consumer import AbstractConsumer
11
- from funboost.core.lazy_impoter import AioHttpImporter
11
+ from funboost.core.function_result_status_saver import FunctionResultStatus
12
+ from funboost.core.msg_result_getter import FutureStatusResult
13
+ from funboost.core.serialization import Serialization
14
+
15
+
12
16
 
13
17
 
14
18
  class HTTPConsumer(AbstractConsumer, ):
15
19
  """
16
- http 实现消息队列,不支持持久化,但不需要安装软件。
20
+ flask 作为消息队列实现 consumer
17
21
  """
18
22
  BROKER_EXCLUSIVE_CONFIG_DEFAULT = {'host': '127.0.0.1', 'port': None}
19
23
 
@@ -30,42 +34,105 @@ class HTTPConsumer(AbstractConsumer, ):
30
34
  if self._port is None:
31
35
  raise ValueError('please specify port')
32
36
 
33
- # noinspection DuplicatedCode
34
37
  def _shedual_task(self):
35
- # flask_app = Flask(__name__)
36
- #
37
- # @flask_app.route('/queue', methods=['post'])
38
- # def recv_msg():
39
- # msg = request.form['msg']
40
- # kw = {'body': json.loads(msg)}
41
- # self._submit_task(kw)
42
- # return 'finish'
43
- #
44
- # flask_app.run('0.0.0.0', port=self._port,debug=False)
38
+ """
39
+ 使用Flask实现HTTP服务器
40
+ 相比aiohttp,Flask是同步框架,避免了异步阻塞问题
41
+ """
42
+
45
43
 
46
- routes = AioHttpImporter().web.RouteTableDef()
44
+ # 创建Flask应用
45
+ flask_app = Flask(__name__)
46
+ # 关闭Flask的日志,避免干扰funboost的日志
47
+ flask_app.logger.disabled = True
48
+ logging.getLogger('werkzeug').disabled = True
49
+
50
+ @flask_app.route('/', methods=['GET'])
51
+ def hello():
52
+ """健康检查接口"""
53
+ return "Hello, from funboost (Flask version)"
54
+
55
+ @flask_app.route('/queue', methods=['POST'])
56
+ def recv_msg():
57
+ """
58
+ 接收消息的核心接口
59
+ 支持两种调用类型:
60
+ 1. publish: 异步发布,立即返回
61
+ 2. sync_call: 同步调用,等待结果返回
62
+ """
63
+ try:
64
+ # 获取请求数据
65
+ msg = request.form.get('msg')
66
+ call_type = request.form.get('call_type', 'publish')
67
+
68
+ if not msg:
69
+ return {"error": "msg parameter is required"}, 400
70
+
71
+ # 构造消息数据
72
+ kw = {
73
+ 'body': msg,
74
+ 'call_type': call_type,
75
+ }
76
+
77
+ if call_type == 'sync_call':
78
+ # 同步调用:需要等待执行结果
79
+ future_status_result = FutureStatusResult(call_type=call_type)
80
+ kw['future_status_result'] = future_status_result
81
+
82
+ # 提交任务到线程池执行
83
+ self._submit_task(kw)
84
+
85
+ # 等待任务完成(带超时)
86
+ if future_status_result.wait_finish(self.consumer_params.rpc_timeout):
87
+ # 返回执行结果
88
+ result = future_status_result.get_staus_result_obj()
89
+ return Serialization.to_json_str(
90
+ result.get_status_dict(without_datetime_obj=True)
91
+ )
92
+ else:
93
+ # 超时处理
94
+ self.logger.error(f'sync_call wait timeout after {self.consumer_params.rpc_timeout}s')
95
+ return {"error": "execution timeout"}, 408
96
+
97
+ else:
98
+ # 异步发布:直接提交任务,立即返回
99
+ self._submit_task(kw)
100
+ return "finish"
101
+
102
+ except Exception as e:
103
+ self.logger.error(f'处理HTTP请求时出错: {e}', exc_info=True)
104
+ return {"error": str(e)}, 500
105
+
106
+ # 启动Flask服务器
107
+ # 注意:Flask默认是单线程的,但funboost使用线程池处理任务,所以这里threaded=True
108
+ self.logger.info(f'启动Flask HTTP服务器,监听 {self._ip}:{self._port}')
47
109
 
48
- # noinspection PyUnusedLocal
49
- @routes.get('/')
50
- async def hello(request):
51
- return AioHttpImporter().web.Response(text="Hello, from funboost")
110
+ # flask_app.run(
111
+ # host='0.0.0.0', # 监听所有接口
112
+ # port=self._port,
113
+ # debug=False, # 生产环境关闭debug
114
+ # threaded=True, # 开启多线程支持
115
+ # use_reloader=False, # 关闭自动重载
116
+ # )
52
117
 
53
- @routes.post('/queue')
54
- async def recv_msg(request: AioHttpImporter().Request):
55
- data = await request.post()
56
- msg = data['msg']
57
- kw = {'body': msg}
58
- self._submit_task(kw)
59
- return AioHttpImporter().web.Response(text="finish")
118
+ import waitress
119
+ waitress.serve(flask_app, host='0.0.0.0', port=self._port,threads=self.consumer_params.concurrent_num)
60
120
 
61
- app = AioHttpImporter().web.Application()
62
- app.add_routes(routes)
63
- loop = asyncio.new_event_loop()
64
- asyncio.set_event_loop(loop)
65
- AioHttpImporter().web.run_app(app, host='0.0.0.0', port=self._port, )
121
+ def _frame_custom_record_process_info_func(self, current_function_result_status: FunctionResultStatus, kw: dict):
122
+ """
123
+ 任务执行完成后的回调函数
124
+ 对于sync_call模式,需要通知等待的HTTP请求
125
+ """
126
+ if kw['call_type'] == "sync_call":
127
+ future_status_result: FutureStatusResult = kw['future_status_result']
128
+ future_status_result.set_staus_result_obj(current_function_result_status)
129
+ future_status_result.set_finish()
130
+ # self.logger.info('sync_call任务执行完成,通知HTTP请求返回结果')
66
131
 
67
132
  def _confirm_consume(self, kw):
68
- pass # 没有确认消费的功能。
133
+ """HTTP模式没有确认消费的功能"""
134
+ pass
69
135
 
70
136
  def _requeue(self, kw):
137
+ """HTTP模式没有重新入队的功能"""
71
138
  pass
@@ -0,0 +1,113 @@
1
+ # -*- coding: utf-8 -*-
2
+ # @Author : ydf
3
+ # @Time : 2022/8/8 0008 13:32
4
+ import asyncio
5
+ import json
6
+
7
+ # from aiohttp import web
8
+ # from aiohttp.web_request import Request
9
+
10
+ from funboost.consumers.base_consumer import AbstractConsumer
11
+ from funboost.core.function_result_status_saver import FunctionResultStatus
12
+ from funboost.core.lazy_impoter import AioHttpImporter
13
+ from funboost.core.serialization import Serialization
14
+
15
+
16
+ class AioFutureStatusResult:
17
+ def __init__(self,call_type:str):
18
+ self.execute_finish_event = asyncio.Event()
19
+ self.staus_result_obj: FunctionResultStatus = None
20
+ self.call_type = call_type # sync_call or publish
21
+
22
+ def set_finish(self):
23
+ self.execute_finish_event.set()
24
+
25
+ async def wait_finish(self,rpc_timeout):
26
+ return await self.execute_finish_event.wait()
27
+
28
+ def set_staus_result_obj(self, staus_result_obj:FunctionResultStatus):
29
+ self.staus_result_obj = staus_result_obj
30
+
31
+ def get_staus_result_obj(self):
32
+ return self.staus_result_obj
33
+
34
+ class HTTPConsumer(AbstractConsumer, ):
35
+ """
36
+ aiohttp 实现消息队列,不支持持久化,但不需要安装软件。
37
+ """
38
+ BROKER_EXCLUSIVE_CONFIG_DEFAULT = {'host': '127.0.0.1', 'port': None}
39
+
40
+ # noinspection PyAttributeOutsideInit
41
+ def custom_init(self):
42
+ # try:
43
+ # self._ip, self._port = self.queue_name.split(':')
44
+ # self._port = int(self._port)
45
+ # except BaseException as e:
46
+ # self.logger.critical(f'http作为消息队列时候,队列名字必须设置为 例如 192.168.1.101:8200 这种, ip:port')
47
+ # raise e
48
+ self._ip = self.consumer_params.broker_exclusive_config['host']
49
+ self._port = self.consumer_params.broker_exclusive_config['port']
50
+ if self._port is None:
51
+ raise ValueError('please specify port')
52
+
53
+ # noinspection DuplicatedCode
54
+ def _shedual_task(self):
55
+ # flask_app = Flask(__name__)
56
+ #
57
+ # @flask_app.route('/queue', methods=['post'])
58
+ # def recv_msg():
59
+ # msg = request.form['msg']
60
+ # kw = {'body': json.loads(msg)}
61
+ # self._submit_task(kw)
62
+ # return 'finish'
63
+ #
64
+ # flask_app.run('0.0.0.0', port=self._port,debug=False)
65
+
66
+ routes = AioHttpImporter().web.RouteTableDef()
67
+
68
+ # noinspection PyUnusedLocal
69
+ @routes.get('/')
70
+ async def hello(request):
71
+ return AioHttpImporter().web.Response(text="Hello, from funboost")
72
+
73
+ @routes.post('/queue')
74
+ async def recv_msg(request: AioHttpImporter().Request):
75
+ data = await request.post()
76
+ msg = data['msg']
77
+ call_type = data['call_type']
78
+ kw = {'body': msg,'call_type': call_type,}
79
+ if call_type == 'sync_call':
80
+ aio_future_status_result = AioFutureStatusResult(call_type=call_type)
81
+ kw['aio_future_status_result'] = aio_future_status_result
82
+ self._submit_task(kw)
83
+ if data['call_type'] == 'sync_call':
84
+ await aio_future_status_result.wait_finish(self.consumer_params.rpc_timeout)
85
+ return AioHttpImporter().web.Response(text=Serialization.to_json_str(
86
+ aio_future_status_result.get_staus_result_obj().get_status_dict(without_datetime_obj=True)))
87
+ return AioHttpImporter().web.Response(text="finish")
88
+
89
+ app = AioHttpImporter().web.Application()
90
+ app.add_routes(routes)
91
+ loop = asyncio.new_event_loop()
92
+ asyncio.set_event_loop(loop)
93
+ AioHttpImporter().web.run_app(app, host='0.0.0.0', port=self._port, )
94
+
95
+ def _frame_custom_record_process_info_func(self,current_function_result_status: FunctionResultStatus,kw:dict):
96
+ if kw['call_type'] == "sync_call":
97
+ aio_future_status_result: AioFutureStatusResult = kw['aio_future_status_result']
98
+ aio_future_status_result.set_staus_result_obj(current_function_result_status)
99
+ aio_future_status_result.set_finish()
100
+ self.logger.info(f'aio_future_status_result.set_finish()')
101
+
102
+ # async def _aio_frame_custom_record_process_info_func(self,current_function_result_status: FunctionResultStatus,kw:dict):
103
+ # self.logger.info(666666)
104
+ # if kw['call_type'] == "sync_call":
105
+ # aio_future_status_result: AioFutureStatusResult = kw['aio_future_status_result']
106
+ # aio_future_status_result.set_staus_result_obj(current_function_result_status)
107
+ # aio_future_status_result.set_finish()
108
+ # self.logger.info(f'aio_future_status_result.set_finish()')
109
+ def _confirm_consume(self, kw):
110
+ pass # 没有确认消费的功能。
111
+
112
+ def _requeue(self, kw):
113
+ pass
@@ -60,10 +60,7 @@ class KafkaConsumer(AbstractConsumer):
60
60
  # REMIND 好处是并发高。topic像翻书一样,随时可以设置偏移量重新消费。多个分组消费同一个主题,每个分组对相同主题的偏移量互不干扰 。
61
61
  for message in consumer:
62
62
  # 注意: message ,value都是原始的字节数据,需要decode
63
- if self.consumer_params.is_show_message_get_from_broker:
64
- self.logger.debug(
65
- f'从kafka的 [{message.topic}] 主题,分区 {message.partition} 中 取出的消息是: {message.value.decode()}')
66
- kw = {'consumer': consumer, 'message': message, 'body': message.value}
63
+ kw = {'consumer': consumer, 'message': message, 'body': message.value.decode('utf-8')}
67
64
  self._submit_task(kw)
68
65
 
69
66
  def _confirm_consume(self, kw):
@@ -205,12 +205,13 @@ class QueueConusmerParamsGetter(RedisMixin, FunboostFileLoggerMixin):
205
205
  def _inner():
206
206
  while True:
207
207
  t_start = time.time()
208
- # 这个函数确保只有一个地方在上报数据,避免重复上报
208
+ # 这个函数确保只有一个地方在上报数据,避免重复采集上报
209
209
  report_ts = self.timestamp()
210
210
  redis_report_uuid_ts_str = self.redis_db_frame.get(RedisKeys.FUNBOOST_LAST_GET_QUEUE_PARAMS_AND_ACTIVE_CONSUMERS_AND_REPORT__UUID_TS, )
211
211
  if redis_report_uuid_ts_str:
212
212
  redis_report_uuid_ts = Serialization.to_dict(redis_report_uuid_ts_str)
213
213
  if redis_report_uuid_ts['report_uuid'] != report_uuid and redis_report_uuid_ts['report_ts'] > report_ts - time_interval - 10 :
214
+ time.sleep(5) # 防止cpu空转
214
215
  continue
215
216
  self.redis_db_frame.set(RedisKeys.FUNBOOST_LAST_GET_QUEUE_PARAMS_AND_ACTIVE_CONSUMERS_AND_REPORT__UUID_TS,
216
217
  Serialization.to_json_str({'report_uuid':report_uuid, 'report_ts':report_ts}))
@@ -229,17 +230,38 @@ class QueueConusmerParamsGetter(RedisMixin, FunboostFileLoggerMixin):
229
230
  RedisKeys.gen_funboost_queue_time_series_data_key_by_queue_name(queue),
230
231
  0, report_ts - 86400
231
232
  )
232
- self.logger.info(f'上报时序数据耗时 {time.time() - t_start} 秒')
233
+ self.logger.info(f'采集上报时序数据耗时 {time.time() - t_start} 秒')
233
234
 
234
235
  time.sleep(time_interval)
235
236
  threading.Thread(target=_inner, daemon=daemon).start()
236
237
 
237
- def get_time_series_data_by_queue_name(self,queue_name,start_ts=None,end_ts=None):
238
+ def get_time_series_data_by_queue_name(self,queue_name,start_ts=None,end_ts=None,curve_samples_count=None):
238
239
  res = self.redis_db_frame.zrangebyscore(
239
240
  RedisKeys.gen_funboost_queue_time_series_data_key_by_queue_name(queue_name),
240
241
  max(float(start_ts or 0),self.timestamp() - 86400) ,float(end_ts or -1),withscores=True)
241
242
  # print(res)
242
- return [{'report_data':Serialization.to_dict(item[0]),'report_ts':item[1]} for item in res]
243
+ series_data_all= [{'report_data':Serialization.to_dict(item[0]),'report_ts':item[1]} for item in res]
244
+ if curve_samples_count is None:
245
+ return series_data_all
246
+
247
+ # 曲线采样数量
248
+ total_count = len(series_data_all)
249
+ if total_count <= curve_samples_count:
250
+ # 如果原始数据量小于等于需要的样本数,直接返回全部数据
251
+ return series_data_all
252
+
253
+ # 计算采样步长
254
+ step = total_count / curve_samples_count
255
+ sampled_data = []
256
+
257
+ # 按照步长进行采样
258
+ for i in range(curve_samples_count):
259
+ index = int(i * step)
260
+ if index < total_count:
261
+ sampled_data.append(series_data_all[index])
262
+
263
+ return sampled_data
264
+
243
265
 
244
266
  if __name__ == '__main__':
245
267
  # print(Serialization.to_json_str(QueueConusmerParamsGetter().get_queue_params_and_active_consumers()))
funboost/core/booster.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
  import copy
3
3
  import inspect
4
+ from multiprocessing import Process
4
5
  import os
5
6
  import sys
6
7
  import types
@@ -242,10 +243,10 @@ class BoostersManager:
242
243
  """
243
244
 
244
245
  # pid_queue_name__booster_map字典存放 {(进程id,queue_name):Booster对象}
245
- pid_queue_name__booster_map = {} # type: typing.Dict[typing.Tuple[int,str],Booster]
246
+ pid_queue_name__booster_map :typing.Dict[typing.Tuple[int,str],Booster]= {}
246
247
 
247
248
  # queue_name__boost_params_consuming_function_map 字典存放 {queue_name,(@boost的入参字典,@boost装饰的消费函数)}
248
- queue_name__boost_params_map = {} # type: typing.Dict[str,BoosterParams]
249
+ queue_name__boost_params_map :typing.Dict[str,BoosterParams]= {}
249
250
 
250
251
  pid_queue_name__has_start_consume_set = set()
251
252
 
@@ -392,6 +393,34 @@ class BoostersManager:
392
393
 
393
394
  m_consume = multi_process_consume_queues
394
395
 
396
+ @classmethod
397
+ def consume_group(cls, booster_group:str,block=False):
398
+ """
399
+ 根据@boost装饰器的 booster_group消费分组名字,启动多个消费函数;
400
+ """
401
+ if booster_group is None:
402
+ raise ValueError('booster_group 不能为None')
403
+ need_consume_queue_names = []
404
+ for queue_name in cls.get_all_queues():
405
+ booster= cls.get_or_create_booster_by_queue_name(queue_name)
406
+ if booster.boost_params.booster_group == booster_group:
407
+ need_consume_queue_names.append(queue_name)
408
+ flogger.info(f'according to booster_group:{booster_group} ,start consume queues: {need_consume_queue_names}')
409
+ for queue_name in need_consume_queue_names:
410
+ cls.get_or_create_booster_by_queue_name(queue_name).consume()
411
+ if block:
412
+ ctrl_c_recv()
413
+
414
+ @classmethod
415
+ def multi_process_consume_group(cls, booster_group:str, process_num=1):
416
+ """
417
+ 根据@boost装饰器的 booster_group消费分组名字,启动多个消费函数;
418
+ """
419
+ for _ in range(process_num):
420
+ Process(target=cls.consume_group,args=(booster_group,True)).start()
421
+
422
+ m_consume_group = multi_process_consume_group
423
+
395
424
  @classmethod
396
425
  def multi_process_consume_all_queues(cls, process_num=1):
397
426
  """