funboost 49.0__py3-none-any.whl → 49.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of funboost might be problematic. Click here for more details.

Files changed (27) hide show
  1. funboost/__init__.py +1 -1
  2. funboost/concurrent_pool/async_helper.py +20 -1
  3. funboost/concurrent_pool/async_pool_executor.py +40 -56
  4. funboost/concurrent_pool/backup/async_pool_executor_back.py +4 -2
  5. funboost/constant.py +27 -3
  6. funboost/consumers/base_consumer.py +11 -6
  7. funboost/core/active_cousumer_info_getter.py +51 -4
  8. funboost/function_result_web/__pycache__/app.cpython-313.pyc +0 -0
  9. funboost/function_result_web/__pycache__/app.cpython-37.pyc +0 -0
  10. funboost/function_result_web/__pycache__/functions.cpython-313.pyc +0 -0
  11. funboost/function_result_web/app.py +35 -7
  12. funboost/function_result_web/app_debug_start.py +4 -0
  13. funboost/function_result_web/templates/queue_op.html +149 -192
  14. funboost/function_result_web/templates/rpc_call.html +1 -1
  15. funboost/publishers/faststream_publisher.py +3 -2
  16. funboost/set_frame_config.py +2 -1
  17. funboost/timing_job/apscheduler_use_redis_store.py +8 -1
  18. funboost/timing_job/timing_job_base.py +21 -8
  19. funboost/timing_job/timing_push.py +26 -8
  20. funboost/utils/dependency_packages_in_pythonpath/aioredis/readme.md +0 -6
  21. funboost/utils/dependency_packages_in_pythonpath/readme.md +0 -6
  22. {funboost-49.0.dist-info → funboost-49.2.dist-info}/METADATA +7 -6
  23. {funboost-49.0.dist-info → funboost-49.2.dist-info}/RECORD +27 -25
  24. {funboost-49.0.dist-info → funboost-49.2.dist-info}/LICENSE +0 -0
  25. {funboost-49.0.dist-info → funboost-49.2.dist-info}/WHEEL +0 -0
  26. {funboost-49.0.dist-info → funboost-49.2.dist-info}/entry_points.txt +0 -0
  27. {funboost-49.0.dist-info → funboost-49.2.dist-info}/top_level.txt +0 -0
funboost/__init__.py CHANGED
@@ -13,7 +13,7 @@ set_frame_config这个模块的 use_config_form_funboost_config_module() 是核
13
13
  这段注释说明和使用的用户无关,只和框架开发人员有关.
14
14
  '''
15
15
 
16
- __version__ = "49.0"
16
+ __version__ = "49.2"
17
17
 
18
18
  from funboost.set_frame_config import show_frame_config
19
19
 
@@ -9,6 +9,21 @@ async_executor_default = ThreadPoolExecutorShrinkAble(500)
9
9
  # async_executor_default = FlexibleThreadPool(50) # 这个不支持future特性
10
10
 
11
11
 
12
+ def get_or_create_event_loop():
13
+ try:
14
+ # Python 3.7+
15
+ return asyncio.get_running_loop()
16
+ except RuntimeError:
17
+ # 没有正在运行的 loop
18
+ try:
19
+ # Python 3.6~3.9:get_event_loop 会自动创建
20
+ return asyncio.get_event_loop()
21
+ except RuntimeError:
22
+ # Python 3.10+:get_event_loop 不再自动创建
23
+ loop = asyncio.new_event_loop()
24
+ asyncio.set_event_loop(loop)
25
+ return loop
26
+
12
27
  async def simple_run_in_executor(f, *args, async_executor: Executor = None, async_loop=None, **kwargs):
13
28
  """
14
29
  一个很强的函数,使任意同步同步函数f,转化成asyncio异步api语法,
@@ -31,13 +46,17 @@ async def simple_run_in_executor(f, *args, async_executor: Executor = None, asyn
31
46
  :param kwargs:f函数的关键字方式入参
32
47
  :return:
33
48
  """
34
- loopx = async_loop or asyncio.get_event_loop()
49
+ loopx = async_loop or get_or_create_event_loop()
35
50
  async_executorx = async_executor or async_executor_default
36
51
  # print(id(loopx))
37
52
  result = await loopx.run_in_executor(async_executorx, partial(f, *args, **kwargs))
38
53
  return result
39
54
 
40
55
 
56
+
57
+
58
+
59
+
41
60
  if __name__ == '__main__':
42
61
  import time
43
62
  import requests
@@ -62,28 +62,27 @@ class AsyncPoolExecutorLtPy310(FunboostFileLoggerMixin,FunboostBaseConcurrentPoo
62
62
  self._size = size
63
63
  self.loop = loop or asyncio.new_event_loop()
64
64
  asyncio.set_event_loop(self.loop)
65
- self._sem = asyncio.Semaphore(self._size, loop=self.loop)
66
- self._queue = asyncio.Queue(maxsize=size, loop=self.loop)
67
- self._lock = threading.Lock()
68
- t = Thread(target=self._start_loop_in_new_thread, daemon=True)
65
+ self._diff_init()
66
+ # self._lock = threading.Lock()
67
+ t = Thread(target=self._start_loop_in_new_thread, daemon=False)
69
68
  # t.setDaemon(True) # 设置守护线程是为了有机会触发atexit,使程序自动结束,不用手动调用shutdown
70
69
  t.start()
71
- self._can_be_closed_flag = False
72
- atexit.register(self.shutdown)
73
-
74
- self._event = threading.Event()
75
- # print(self._event.is_set())
76
- self._event.set()
77
-
78
- def submit000(self, func, *args, **kwargs):
79
- # 这个性能比下面的采用 run_coroutine_threadsafe + result返回快了3倍多。
80
- with self._lock:
81
- while 1:
82
- if not self._queue.full():
83
- self.loop.call_soon_threadsafe(self._queue.put_nowait, (func, args, kwargs))
84
- break
85
- else:
86
- time.sleep(0.01)
70
+
71
+
72
+ # def submit000(self, func, *args, **kwargs):
73
+ # # 这个性能比下面的采用 run_coroutine_threadsafe + result返回快了3倍多。
74
+ # with self._lock:
75
+ # while 1:
76
+ # if not self._queue.full():
77
+ # self.loop.call_soon_threadsafe(self._queue.put_nowait, (func, args, kwargs))
78
+ # break
79
+ # else:
80
+ # time.sleep(0.01)
81
+
82
+ def _diff_init(self):
83
+ self._sem = asyncio.Semaphore(self._size, loop=self.loop)
84
+ self._queue = asyncio.Queue(maxsize=self._size, loop=self.loop)
85
+
87
86
 
88
87
  def submit(self, func, *args, **kwargs):
89
88
  future = asyncio.run_coroutine_threadsafe(self._produce(func, *args, **kwargs), self.loop) # 这个 run_coroutine_threadsafe 方法也有缺点,消耗的性能巨大。
@@ -114,51 +113,36 @@ class AsyncPoolExecutorLtPy310(FunboostFileLoggerMixin,FunboostBaseConcurrentPoo
114
113
  # self._loop.run_forever()
115
114
 
116
115
  # asyncio.set_event_loop(self.loop)
117
- self.loop.run_until_complete(asyncio.wait([self._consume() for _ in range(self._size)], loop=self.loop))
118
- self._can_be_closed_flag = True
116
+ # self.loop.run_until_complete(asyncio.wait([self._consume() for _ in range(self._size)], loop=self.loop))
117
+ # self._can_be_closed_flag = True
118
+ [self.loop.create_task(self._consume()) for _ in range(self._size)]
119
+ try:
120
+ self.loop.run_forever()
121
+ except Exception as e:
122
+ self.logger.warning(f'{e}') # 如果多个线程使用一个loop,不能重复启动loop,否则会报错。
123
+
124
+
119
125
 
120
- def shutdown(self):
121
- if self.loop.is_running(): # 这个可能是atregster触发,也可能是用户手动调用,需要判断一下,不能关闭两次。
122
- for i in range(self._size):
123
- self.submit(f'stop{i}', )
124
- while not self._can_be_closed_flag:
125
- time.sleep(0.1)
126
- self.loop.stop()
127
- self.loop.close()
128
- print('关闭循环')
126
+ # def shutdown(self):
127
+ # if self.loop.is_running(): # 这个可能是atregster触发,也可能是用户手动调用,需要判断一下,不能关闭两次。
128
+ # for i in range(self._size):
129
+ # self.submit(f'stop{i}', )
130
+ # while not self._can_be_closed_flag:
131
+ # time.sleep(0.1)
132
+ # self.loop.stop()
133
+ # self.loop.close()
134
+ # print('关闭循环')
129
135
 
130
136
 
131
137
 
132
138
  class AsyncPoolExecutorGtPy310(AsyncPoolExecutorLtPy310):
133
- # noinspection PyMissingConstructor
134
- def __init__(self, size, loop=None):
135
- """
136
139
 
137
- :param size: 同时并发运行的协程任务数量。
138
- :param loop:
139
- """
140
- self._size = size
141
- self.loop = loop or asyncio.new_event_loop()
142
- self._sem = asyncio.Semaphore(self._size, )
143
- self._queue = asyncio.Queue(maxsize=size, )
144
- self._lock = threading.Lock()
145
- t = Thread(target=self._start_loop_in_new_thread, daemon=True)
146
- # t.setDaemon(True) # 设置守护线程是为了有机会触发atexit,使程序自动结束,不用手动调用shutdown
147
- t.start()
148
- self._can_be_closed_flag = False
149
- atexit.register(self.shutdown)
140
+ def _diff_init(self):
141
+ self._sem = asyncio.Semaphore(self._size, ) # python3.10后,很多类和方法都删除了loop传参
142
+ self._queue = asyncio.Queue(maxsize=self._size, )
150
143
 
151
- self._event = threading.Event()
152
- # print(self._event.is_set())
153
- self._event.set()
154
144
 
155
- def _start_loop_in_new_thread(self, ):
156
- # self._loop.run_until_complete(self.__run()) # 这种也可以。
157
- # self._loop.run_forever()
158
145
 
159
- # asyncio.set_event_loop(self.loop)
160
- self.loop.run_until_complete(asyncio.wait([self.loop.create_task(self._consume()) for _ in range(self._size)], ))
161
- self._can_be_closed_flag = True
162
146
 
163
147
 
164
148
  AsyncPoolExecutor = AsyncPoolExecutorLtPy310 if sys.version_info.minor < 10 else AsyncPoolExecutorGtPy310
@@ -4,7 +4,9 @@ import threading
4
4
  import time
5
5
  import traceback
6
6
  from threading import Thread
7
- import nb_log # noqa
7
+ import nb_log
8
+
9
+ from funboost.concurrent_pool.async_helper import get_or_create_event_loop # noqa
8
10
 
9
11
  # if os.name == 'posix':
10
12
  # import uvloop
@@ -215,7 +217,7 @@ class AsyncProducerConsumer:
215
217
  task.cancel()
216
218
 
217
219
  def start_run(self):
218
- loop = asyncio.get_event_loop()
220
+ loop = get_or_create_event_loop()
219
221
  loop.run_until_complete(self.__run())
220
222
  # loop.close()
221
223
 
funboost/constant.py CHANGED
@@ -1,9 +1,9 @@
1
1
  # coding= utf-8
2
- from calendar import c
2
+
3
3
 
4
4
 
5
5
  class BrokerEnum:
6
- EMPTY = 'empty' # 空的实现,需要搭配 boost入参的 consumer_override_cls 和 publisher_override_cls使用,或者被继承。
6
+ EMPTY = 'EMPTY' # 空的实现,需要搭配 boost入参的 consumer_override_cls 和 publisher_override_cls使用,或者被继承。
7
7
 
8
8
  RABBITMQ_AMQPSTORM = 'RABBITMQ_AMQPSTORM' # 使用 amqpstorm 包操作rabbitmq 作为 分布式消息队列,支持消费确认.强烈推荐这个作为funboost中间件。
9
9
  RABBITMQ = RABBITMQ_AMQPSTORM
@@ -73,7 +73,7 @@ class BrokerEnum:
73
73
  PEEWEE = 'PEEWEE' # peewee包操作mysql,使用表模拟消息队列
74
74
 
75
75
  CELERY = 'CELERY' # funboost支持celery框架来发布和消费任务,由celery框架来调度执行任务,但是写法简单远远暴击用户亲自使用celery的麻烦程度,
76
- # 用户永无无需关心和操作Celery对象实例,无需关心celery的task_routes和include配置,funboost来自动化设置这些celery配置。
76
+ # 用户永无无需关心和操作Celery对象实例,无需关心celery的task_routes和includes配置,funboost来自动化设置这些celery配置。
77
77
 
78
78
  DRAMATIQ = 'DRAMATIQ' # funboost使用 dramatiq 框架作为消息队列,dramatiq类似celery也是任务队列框架。用户使用funboost api来操作dramatiq核心调度。
79
79
 
@@ -108,7 +108,10 @@ class ConstStrForClassMethod:
108
108
  OBJ_INIT_PARAMS = 'obj_init_params'
109
109
  CLS_MODULE = 'cls_module'
110
110
  CLS_FILE = 'cls_file'
111
+
112
+
111
113
  class RedisKeys:
114
+
112
115
  REDIS_KEY_PAUSE_FLAG = 'funboost_pause_flag'
113
116
  REDIS_KEY_STOP_FLAG = 'funboost_stop_flag'
114
117
  QUEUE__MSG_COUNT_MAP = 'funboost_queue__msg_count_map'
@@ -117,10 +120,17 @@ class RedisKeys:
117
120
  FUNBOOST_QUEUE__RUN_FAIL_COUNT_MAP = 'funboost_queue__run_fail_count_map'
118
121
  FUNBOOST_ALL_QUEUE_NAMES = 'funboost_all_queue_names'
119
122
  FUNBOOST_ALL_IPS = 'funboost_all_ips'
123
+ FUNBOOST_LAST_GET_QUEUE_PARAMS_AND_ACTIVE_CONSUMERS_AND_REPORT__UUID_TS = 'funboost_last_get_queue_params_and_active_consumers_and_report__uuid_ts'
124
+
120
125
 
121
126
  FUNBOOST_HEARTBEAT_QUEUE__DICT_PREFIX = 'funboost_hearbeat_queue__dict:'
122
127
  FUNBOOST_HEARTBEAT_SERVER__DICT_PREFIX = 'funboost_hearbeat_server__dict:'
123
128
 
129
+
130
+ @staticmethod
131
+ def gen_funboost_apscheduler_redis_lock_key_by_queue_name(queue_name):
132
+ return f'funboost.BackgroundSchedulerProcessJobsWithinRedisLock:{queue_name}'
133
+
124
134
  @staticmethod
125
135
  def gen_funboost_hearbeat_queue__dict_key_by_queue_name(queue_name):
126
136
  return f'{RedisKeys.FUNBOOST_HEARTBEAT_QUEUE__DICT_PREFIX}{queue_name}'
@@ -128,3 +138,17 @@ class RedisKeys:
128
138
  @staticmethod
129
139
  def gen_funboost_hearbeat_server__dict_key_by_ip(ip):
130
140
  return f'{RedisKeys.FUNBOOST_HEARTBEAT_SERVER__DICT_PREFIX}{ip}'
141
+
142
+ @staticmethod
143
+ def gen_funboost_queue_time_series_data_key_by_queue_name(queue_name):
144
+ return f'funboost_queue_time_series_data:{queue_name}'
145
+
146
+ @staticmethod
147
+ def gen_funboost_redis_apscheduler_jobs_key_by_queue_name(queue_name):
148
+ jobs_key=f'funboost.apscheduler.jobs:{queue_name}'
149
+ return jobs_key
150
+
151
+ @staticmethod
152
+ def gen_funboost_redis_apscheduler_run_times_key_by_queue_name(queue_name):
153
+ run_times_key=f'funboost.apscheduler.run_times:{queue_name}'
154
+ return run_times_key
@@ -58,7 +58,7 @@ from funboost.core.function_result_status_saver import ResultPersistenceHelper,
58
58
 
59
59
  from funboost.core.helper_funs import delete_keys_and_return_new_dict, get_publish_time, MsgGenerater
60
60
 
61
- from funboost.concurrent_pool.async_helper import simple_run_in_executor
61
+ from funboost.concurrent_pool.async_helper import get_or_create_event_loop, simple_run_in_executor
62
62
  from funboost.concurrent_pool.async_pool_executor import AsyncPoolExecutor
63
63
  # noinspection PyUnresolvedReferences
64
64
  from funboost.concurrent_pool.bounded_threadpoolexcutor import \
@@ -163,9 +163,13 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
163
163
  self._redis_filter_key_name = f'filter_zset:{consumer_params.queue_name}' if consumer_params.task_filtering_expire_seconds else f'filter_set:{consumer_params.queue_name}'
164
164
  filter_class = RedisFilter if consumer_params.task_filtering_expire_seconds == 0 else RedisImpermanencyFilter
165
165
  self._redis_filter = filter_class(self._redis_filter_key_name, consumer_params.task_filtering_expire_seconds)
166
-
166
+ self._redis_filter.delete_expire_filter_task_cycle()
167
+
168
+ # if self.consumer_params.concurrent_mode == ConcurrentModeEnum.ASYNC and self.consumer_params.specify_async_loop is None:
169
+ # self.consumer_params.specify_async_loop= get_or_create_event_loop()
167
170
  self._lock_for_count_execute_task_times_every_unit_time = Lock()
168
- self._async_lock_for_count_execute_task_times_every_unit_time = asyncio.Lock()
171
+ if self.consumer_params.concurrent_mode == ConcurrentModeEnum.ASYNC:
172
+ self._async_lock_for_count_execute_task_times_every_unit_time = asyncio.Lock()
169
173
  # self._unit_time_for_count = 10 # 每隔多少秒计数,显示单位时间内执行多少次,暂时固定为10秒。
170
174
  # self._execute_task_times_every_unit_time = 0 # 每单位时间执行了多少次任务。
171
175
  # self._execute_task_times_every_unit_time_fail =0 # 每单位时间执行了多少次任务失败。
@@ -398,14 +402,15 @@ class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
398
402
  if self.consumer_params.delay_task_apscheduler_jobstores_kind == 'redis':
399
403
  jobstores = {
400
404
  "default": RedisJobStore(**redis_manager.get_redis_conn_kwargs(),
401
- jobs_key=f'funboost.apscheduler.{self.queue_name}.jobs',
402
- run_times_key=f'funboost.apscheduler.{self.queue_name}.run_times',
405
+ jobs_key=RedisKeys.gen_funboost_redis_apscheduler_jobs_key_by_queue_name(self.queue_name),
406
+ run_times_key=RedisKeys.gen_funboost_redis_apscheduler_run_times_key_by_queue_name(self.queue_name),
403
407
  )
404
408
  }
405
409
  self._delay_task_scheduler = FunboostBackgroundSchedulerProcessJobsWithinRedisLock(timezone=FunboostCommonConfig.TIMEZONE, daemon=False,
406
410
  jobstores=jobstores # push 方法的序列化带thredignn.lock
407
411
  )
408
- self._delay_task_scheduler.set_process_jobs_redis_lock_key(f'funboost.BackgroundSchedulerProcessJobsWithinRedisLock.{self.queue_name}')
412
+ self._delay_task_scheduler.set_process_jobs_redis_lock_key(
413
+ RedisKeys.gen_funboost_apscheduler_redis_lock_key_by_queue_name(self.queue_name))
409
414
  elif self.consumer_params.delay_task_apscheduler_jobstores_kind == 'memory':
410
415
  jobstores = {"default": MemoryJobStore()}
411
416
  self._delay_task_scheduler = FsdfBackgroundScheduler(timezone=FunboostCommonConfig.TIMEZONE, daemon=False,
@@ -1,6 +1,8 @@
1
1
  import json
2
+ import threading
2
3
  import time
3
4
  import typing
5
+ import uuid
4
6
 
5
7
  from pydantic import main
6
8
 
@@ -107,7 +109,7 @@ class ActiveCousumerProcessInfoGetter(RedisMixin, FunboostFileLoggerMixin):
107
109
  """获取所有队列对应的活跃消费者进程信息,按队列名划分,不需要传入队列名,自动扫描redis键。请不要在 funboost_config.py 的redis 指定的db中放太多其他业务的缓存键值对"""
108
110
  queue_names = self.get_all_queue_names()
109
111
  infos_map = self._get_all_hearbeat_info_partition_by_redis_keys([RedisKeys.gen_funboost_hearbeat_queue__dict_key_by_queue_name(queue_name) for queue_name in queue_names])
110
- self.logger.info(f'获取所有队列对应的活跃消费者进程信息,按队列名划分,结果是 {json.dumps(infos_map, indent=4)}')
112
+ # self.logger.info(f'获取所有队列对应的活跃消费者进程信息,按队列名划分,结果是 {json.dumps(infos_map, indent=4)}')
111
113
  return infos_map
112
114
 
113
115
  def get_all_hearbeat_info_partition_by_ip(self) -> typing.Dict[typing.AnyStr, typing.List[typing.Dict]]:
@@ -157,7 +159,7 @@ class QueueConusmerParamsGetter(RedisMixin, FunboostFileLoggerMixin):
157
159
  def get_queue_params_and_active_consumers(self):
158
160
  queue__active_consumers_map = ActiveCousumerProcessInfoGetter().get_all_hearbeat_info_partition_by_queue_name()
159
161
 
160
- queue_name_list = list(queue__active_consumers_map.keys())
162
+ # queue_name_list = list(queue__active_consumers_map.keys())
161
163
  queue__history_run_count_map = self.get_queues_history_run_count()
162
164
  queue__history_run_fail_count_map = self.get_queues_history_run_fail_count()
163
165
 
@@ -196,7 +198,52 @@ class QueueConusmerParamsGetter(RedisMixin, FunboostFileLoggerMixin):
196
198
  'all_consumers_total_consume_count_from_start_fail':self._sum_filed_from_active_consumers(active_consumers, 'total_consume_count_from_start_fail'),
197
199
  }
198
200
  return queue_params_and_active_consumers
199
-
201
+
202
+ def cycle_get_queue_params_and_active_consumers_and_report(self,daemon=False):
203
+ time_interval = 10
204
+ report_uuid = str(uuid.uuid4())
205
+ def _inner():
206
+ while True:
207
+ t_start = time.time()
208
+ # 这个函数确保只有一个地方在上报数据,避免重复上报
209
+ report_ts = self.timestamp()
210
+ redis_report_uuid_ts_str = self.redis_db_frame.get(RedisKeys.FUNBOOST_LAST_GET_QUEUE_PARAMS_AND_ACTIVE_CONSUMERS_AND_REPORT__UUID_TS, )
211
+ if redis_report_uuid_ts_str:
212
+ redis_report_uuid_ts = Serialization.to_dict(redis_report_uuid_ts_str)
213
+ if redis_report_uuid_ts['report_uuid'] != report_uuid and redis_report_uuid_ts['report_ts'] > report_ts - time_interval - 10 :
214
+ continue
215
+ self.redis_db_frame.set(RedisKeys.FUNBOOST_LAST_GET_QUEUE_PARAMS_AND_ACTIVE_CONSUMERS_AND_REPORT__UUID_TS,
216
+ Serialization.to_json_str({'report_uuid':report_uuid, 'report_ts':report_ts}))
217
+
218
+ queue_params_and_active_consumers = self.get_queue_params_and_active_consumers()
219
+ for queue,item in queue_params_and_active_consumers.items():
220
+ if len(item['active_consumers']) == 0:
221
+ continue
222
+ report_data = {k:v for k,v in item.items() if k not in ['queue_params','active_consumers']}
223
+
224
+ report_data['report_ts'] = report_ts
225
+ self.redis_db_frame.zadd(RedisKeys.gen_funboost_queue_time_series_data_key_by_queue_name(queue),
226
+ {Serialization.to_json_str(report_data):report_ts} )
227
+ # 删除过期时序数据,只保留最近1天数据
228
+ self.redis_db_frame.zremrangebyscore(
229
+ RedisKeys.gen_funboost_queue_time_series_data_key_by_queue_name(queue),
230
+ 0, report_ts - 86400
231
+ )
232
+ self.logger.info(f'上报时序数据耗时 {time.time() - t_start} 秒')
233
+
234
+ time.sleep(time_interval)
235
+ threading.Thread(target=_inner, daemon=daemon).start()
236
+
237
+ def get_time_series_data_by_queue_name(self,queue_name,start_ts=None,end_ts=None):
238
+ res = self.redis_db_frame.zrangebyscore(
239
+ RedisKeys.gen_funboost_queue_time_series_data_key_by_queue_name(queue_name),
240
+ max(float(start_ts or 0),self.timestamp() - 86400) ,float(end_ts or -1),withscores=True)
241
+ # print(res)
242
+ return [{'report_data':Serialization.to_dict(item[0]),'report_ts':item[1]} for item in res]
200
243
 
201
244
  if __name__ == '__main__':
202
- print(Serialization.to_json_str(QueueConusmerParamsGetter().get_queue_params_and_active_consumers()))
245
+ # print(Serialization.to_json_str(QueueConusmerParamsGetter().get_queue_params_and_active_consumers()))
246
+ # QueueConusmerParamsGetter().cycle_get_queue_params_and_active_consumers_and_report()
247
+ print(QueueConusmerParamsGetter().get_time_series_data_by_queue_name('queue_test_g03t',1749617883,1749621483))
248
+ # print(QueueConusmerParamsGetter().get_time_series_data_by_queue_name('queue_test_g03t',))
249
+
@@ -221,10 +221,7 @@ def hearbeat_info_partion_by_ip():
221
221
  def get_queue_params_and_active_consumers():
222
222
  return jsonify(QueueConusmerParamsGetter().get_queue_params_and_active_consumers())
223
223
 
224
- @app.route('/queue/message_count/<broker_kind>/<queue_name>')
225
- def get_message_count(broker_kind,queue_name):
226
- publisher = BoostersManager.get_cross_project_publisher(PublisherParams(queue_name=queue_name, broker_kind=broker_kind, publish_msg_log_use_full_msg=True))
227
- return jsonify({'count':publisher.get_message_count(),'success':True})
224
+
228
225
 
229
226
 
230
227
  @app.route('/queue/clear/<broker_kind>/<queue_name>',methods=['POST'])
@@ -243,9 +240,39 @@ def resume_consume(queue_name):
243
240
  RedisMixin().redis_db_frame.hset(RedisKeys.REDIS_KEY_PAUSE_FLAG, queue_name,'0')
244
241
  return jsonify({'success':True})
245
242
 
246
- @app.route('/queue/get_msg_num',methods=['GET'])
247
- def get_msg_num():
243
+ @app.route('/queue/get_msg_num_all_queues',methods=['GET'])
244
+ def get_msg_num_all_queues():
245
+ """这个是通过消费者周期每隔10秒上报到redis的,性能好。不需要实时获取每个消息队列,直接从redis读取所有队列的消息数量"""
248
246
  return jsonify(QueueConusmerParamsGetter().get_msg_num(ignore_report_ts=True))
247
+
248
+ @app.route('/queue/message_count/<broker_kind>/<queue_name>')
249
+ def get_message_count(broker_kind,queue_name):
250
+ """这个是实时获取每个消息队列的消息数量,性能差,但是可以实时获取每个消息队列的消息数量"""
251
+ queue_params = QueueConusmerParamsGetter().get_queue_params()
252
+ for queue_namex,params in queue_params.items():
253
+ if params['broker_kind'] == broker_kind and queue_namex == queue_name:
254
+ publisher = BoostersManager.get_cross_project_publisher(
255
+ PublisherParams(queue_name=queue_name,
256
+ broker_kind=broker_kind,
257
+ broker_exclusive_config=params['broker_exclusive_config'],
258
+ publish_msg_log_use_full_msg=True))
259
+ return jsonify({'count':publisher.get_message_count(),'success':True})
260
+ return jsonify({'success':False,'msg':f'队列{queue_name}不存在'})
261
+
262
+ @app.route('/queue/get_time_series_data/<queue_name>',methods=['GET'])
263
+ def get_time_series_data_by_queue_name(queue_name,):
264
+ """_summary_
265
+
266
+ Args:
267
+ queue_name (_type_): _description_
268
+
269
+ Returns:
270
+ _type_: _description_
271
+
272
+ 返回例如 [{'report_data': {'pause_flag': -1, 'msg_num_in_broker': 936748, 'history_run_count': '150180', 'history_run_fail_count': '46511', 'all_consumers_last_x_s_execute_count': 7, 'all_consumers_last_x_s_execute_count_fail': 0, 'all_consumers_last_x_s_avarage_function_spend_time': 3.441, 'all_consumers_avarage_function_spend_time_from_start': 4.598, 'all_consumers_total_consume_count_from_start': 1296, 'all_consumers_total_consume_count_from_start_fail': 314, 'report_ts': 1749617360.597841}, 'report_ts': 1749617360.597841}, {'report_data': {'pause_flag': -1, 'msg_num_in_broker': 936748, 'history_run_count': '150184', 'history_run_fail_count': '46514', 'all_consumers_last_x_s_execute_count': 7, 'all_consumers_last_x_s_execute_count_fail': 0, 'all_consumers_last_x_s_avarage_function_spend_time': 3.441, 'all_consumers_avarage_function_spend_time_from_start': 4.599, 'all_consumers_total_consume_count_from_start': 1299, 'all_consumers_total_consume_count_from_start_fail': 316, 'report_ts': 1749617370.628166}, 'report_ts': 1749617370.628166}]
273
+ """
274
+ return jsonify(QueueConusmerParamsGetter().get_time_series_data_by_queue_name(
275
+ queue_name,request.args.get('start_ts'),request.args.get('end_ts')))
249
276
 
250
277
  @app.route('/rpc/rpc_call',methods=['POST'])
251
278
  def rpc_call():
@@ -280,6 +307,7 @@ def start_funboost_web_manager(host='0.0.0.0', port=27018,block=False):
280
307
  print('start_funboost_web_manager , sys.path :', sys.path)
281
308
  def _start_funboost_web_manager():
282
309
  app.run(debug=False, threaded=True, host=host, port=port)
310
+ QueueConusmerParamsGetter().cycle_get_queue_params_and_active_consumers_and_report()
283
311
  if block is True:
284
312
  _start_funboost_web_manager()
285
313
  else:
@@ -291,7 +319,7 @@ if __name__ == '__main__':
291
319
  # app.jinja_env.auto_reload = True
292
320
  # with app.test_request_context():
293
321
  # print(url_for('query_cols_view'))
294
-
322
+ QueueConusmerParamsGetter().cycle_get_queue_params_and_active_consumers_and_report(daemon=True)
295
323
  app.run(debug=False, threaded=True, host='0.0.0.0', port=27018)
296
324
 
297
325
 
@@ -1,6 +1,10 @@
1
+
2
+
3
+ from funboost.core.active_cousumer_info_getter import QueueConusmerParamsGetter
1
4
  from funboost.function_result_web.app import app
2
5
 
3
6
 
4
7
  if __name__ == '__main__':
8
+ QueueConusmerParamsGetter().cycle_get_queue_params_and_active_consumers_and_report(daemon=True)
5
9
  app.run(debug=True, threaded=True, host='0.0.0.0', port=27019)
6
10