mdbq 4.2.14__py3-none-any.whl → 4.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mdbq might be problematic. Click here for more details.

mdbq/__version__.py CHANGED
@@ -1 +1 @@
1
- VERSION = '4.2.14'
1
+ VERSION = '4.2.15'
mdbq/route/monitor.py CHANGED
@@ -68,7 +68,6 @@ class RouteMonitor:
68
68
  self.queue_name = "api_monitor:tasks"
69
69
 
70
70
  # 线程锁(用于保护统计数据)
71
- import threading
72
71
  self._stats_lock = threading.Lock()
73
72
 
74
73
  # 统计信息
@@ -173,9 +172,9 @@ class RouteMonitor:
173
172
  `请求id` VARCHAR(64) NOT NULL COMMENT '请求唯一标识(用于追踪)',
174
173
  `请求时间` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) COMMENT '请求时间,精确到毫秒',
175
174
  `请求方法` VARCHAR(10) NOT NULL COMMENT 'HTTP 方法(GET/POST/PUT/DELETE等)',
176
- `接口路径` VARCHAR(500) NOT NULL COMMENT 'API 接口路径',
175
+ `路由地址` VARCHAR(500) NOT NULL COMMENT 'API 路由地址',
177
176
  `客户端ip` VARCHAR(45) NOT NULL COMMENT '客户端 ip 地址(支持 IPv6)',
178
- `响应状态码` SMALLINT COMMENT 'HTTP 响应状态码',
177
+ `状态码` SMALLINT COMMENT 'HTTP 状态码',
179
178
  `响应耗时` DECIMAL(10,3) COMMENT '请求处理耗时(毫秒)',
180
179
  `用户标识` VARCHAR(64) COMMENT '用户id或标识(如有)',
181
180
  `用户代理` VARCHAR(500) COMMENT '浏览器 User-Agent(精简版)',
@@ -186,11 +185,11 @@ class RouteMonitor:
186
185
 
187
186
  UNIQUE KEY `uk_请求id` (`请求id`),
188
187
  INDEX `idx_请求时间` (`请求时间`),
189
- INDEX `idx_接口路径` (`接口路径`(191)),
188
+ INDEX `idx_路由地址` (`路由地址`(191)),
190
189
  INDEX `idx_客户端ip` (`客户端ip`),
191
- INDEX `idx_响应状态码` (`响应状态码`),
190
+ INDEX `idx_状态码` (`状态码`),
192
191
  INDEX `idx_用户标识` (`用户标识`),
193
- INDEX `idx_时间_接口` (`请求时间`, `接口路径`(191))
192
+ INDEX `idx_时间_接口` (`请求时间`, `路由地址`(191))
194
193
  ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
195
194
  COMMENT='API 访问日志表 - 记录每次请求的核心信息'
196
195
  ROW_FORMAT=COMPRESSED;
@@ -203,7 +202,7 @@ class RouteMonitor:
203
202
  `id` BIGINT AUTO_INCREMENT PRIMARY KEY COMMENT '主键,自增id',
204
203
  `统计日期` DATE NOT NULL COMMENT '统计日期',
205
204
  `统计小时` TINYINT NOT NULL COMMENT '统计小时(0-23)',
206
- `接口路径` VARCHAR(500) NOT NULL COMMENT 'API 接口路径',
205
+ `路由地址` VARCHAR(500) NOT NULL COMMENT 'API 路由地址',
207
206
  `请求方法` VARCHAR(10) NOT NULL COMMENT 'HTTP 请求方法',
208
207
  `请求总数` INT UNSIGNED DEFAULT 0 COMMENT '总请求次数',
209
208
  `成功次数` INT UNSIGNED DEFAULT 0 COMMENT '成功响应次数(状态码 < 400)',
@@ -215,10 +214,10 @@ class RouteMonitor:
215
214
  `创建时间` TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT '记录创建时间',
216
215
  `更新时间` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '记录更新时间',
217
216
 
218
- UNIQUE KEY `uk_日期_小时_接口_方法` (`统计日期`, `统计小时`, `接口路径`(191), `请求方法`),
217
+ UNIQUE KEY `uk_日期_小时_接口_方法` (`统计日期`, `统计小时`, `路由地址`(191), `请求方法`),
219
218
  INDEX `idx_统计日期` (`统计日期`),
220
- INDEX `idx_接口路径` (`接口路径`(191)),
221
- INDEX `idx_日期_接口` (`统计日期`, `接口路径`(191))
219
+ INDEX `idx_路由地址` (`路由地址`(191)),
220
+ INDEX `idx_日期_接口` (`统计日期`, `路由地址`(191))
222
221
  ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
223
222
  COMMENT='API 接口统计表 - 按小时汇总的接口性能数据';
224
223
  """)
@@ -487,7 +486,7 @@ class RouteMonitor:
487
486
  '请求id': request_id,
488
487
  '请求时间': datetime.now(),
489
488
  '请求方法': request.method,
490
- '接口路径': request.endpoint or request.path,
489
+ '路由地址': request.endpoint or request.path,
491
490
  '客户端ip': client_ip,
492
491
  '用户标识': user_id,
493
492
  '用户代理': user_agent,
@@ -521,8 +520,8 @@ class RouteMonitor:
521
520
  # 插入请求日志
522
521
  sql = """
523
522
  INSERT INTO `api_访问日志` (
524
- `请求id`, `请求时间`, `请求方法`, `接口路径`, `客户端ip`,
525
- `响应状态码`, `响应耗时`, `用户标识`, `用户代理`, `请求参数`, `错误信息`
523
+ `请求id`, `请求时间`, `请求方法`, `路由地址`, `客户端ip`,
524
+ `状态码`, `响应耗时`, `用户标识`, `用户代理`, `请求参数`, `错误信息`
526
525
  ) VALUES (
527
526
  %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s
528
527
  )
@@ -532,9 +531,9 @@ class RouteMonitor:
532
531
  request_data.get('请求id'),
533
532
  request_data.get('请求时间'),
534
533
  request_data.get('请求方法'),
535
- request_data.get('接口路径'),
534
+ request_data.get('路由地址'),
536
535
  request_data.get('客户端ip'),
537
- request_data.get('响应状态码'),
536
+ request_data.get('状态码'),
538
537
  request_data.get('响应耗时'),
539
538
  request_data.get('用户标识'),
540
539
  request_data.get('用户代理'),
@@ -575,7 +574,7 @@ class RouteMonitor:
575
574
  now = datetime.now() # 用于IP统计的最后访问时间
576
575
 
577
576
  # 判断是否成功(状态码 < 400)
578
- status_code = request_data.get('响应状态码', 500)
577
+ status_code = request_data.get('状态码', 500)
579
578
  is_success = 1 if status_code < 400 else 0
580
579
  is_error = 1 if status_code >= 400 else 0
581
580
  response_time = request_data.get('响应耗时', 0)
@@ -583,7 +582,7 @@ class RouteMonitor:
583
582
  # 1. 更新接口统计表
584
583
  cursor.execute("""
585
584
  INSERT INTO `api_接口统计` (
586
- `统计日期`, `统计小时`, `接口路径`, `请求方法`,
585
+ `统计日期`, `统计小时`, `路由地址`, `请求方法`,
587
586
  `请求总数`, `成功次数`, `失败次数`,
588
587
  `平均耗时`, `最大耗时`, `最小耗时`
589
588
  ) VALUES (
@@ -604,7 +603,7 @@ class RouteMonitor:
604
603
  )
605
604
  """, (
606
605
  date, hour,
607
- request_data.get('接口路径', ''),
606
+ request_data.get('路由地址', ''),
608
607
  request_data.get('请求方法', ''),
609
608
  is_success, is_error,
610
609
  response_time, response_time, response_time,
@@ -699,7 +698,7 @@ class RouteMonitor:
699
698
  end_time = time.time()
700
699
  process_time = round((end_time - start_time) * 1000, 3) # 毫秒
701
700
 
702
- # 获取响应状态码
701
+ # 获取状态码
703
702
  response_status = 200
704
703
  if hasattr(response, 'status_code'):
705
704
  response_status = response.status_code
@@ -715,7 +714,7 @@ class RouteMonitor:
715
714
 
716
715
  # 更新响应数据
717
716
  response_data = {
718
- '响应状态码': response_status,
717
+ '状态码': response_status,
719
718
  '响应耗时': process_time,
720
719
  }
721
720
 
@@ -745,7 +744,7 @@ class RouteMonitor:
745
744
 
746
745
  # 构建错误数据
747
746
  error_data = {
748
- '响应状态码': 500,
747
+ '状态码': 500,
749
748
  '响应耗时': process_time,
750
749
  '错误信息': f"{type(e).__name__}: {str(e)}"
751
750
  }
@@ -807,7 +806,7 @@ class RouteMonitor:
807
806
  SUM(成功次数) as 成功次数,
808
807
  SUM(失败次数) as 失败次数,
809
808
  ROUND(AVG(平均耗时), 2) as 平均耗时,
810
- COUNT(DISTINCT 接口路径) as 接口数量
809
+ COUNT(DISTINCT 路由地址) as 接口数量
811
810
  FROM api_接口统计
812
811
  WHERE 统计日期 BETWEEN %s AND %s
813
812
  """, (start_date, end_date))
@@ -817,12 +816,12 @@ class RouteMonitor:
817
816
  # 2. 热门接口 TOP 10
818
817
  cursor.execute("""
819
818
  SELECT
820
- 接口路径,
819
+ 路由地址,
821
820
  SUM(请求总数) as 请求次数,
822
821
  ROUND(AVG(平均耗时), 2) as 平均耗时
823
822
  FROM api_接口统计
824
823
  WHERE 统计日期 BETWEEN %s AND %s
825
- GROUP BY 接口路径
824
+ GROUP BY 路由地址
826
825
  ORDER BY 请求次数 DESC
827
826
  LIMIT 10
828
827
  """, (start_date, end_date))
@@ -843,12 +842,12 @@ class RouteMonitor:
843
842
  # 4. 性能最慢的接口 TOP 5
844
843
  cursor.execute("""
845
844
  SELECT
846
- 接口路径,
845
+ 路由地址,
847
846
  ROUND(MAX(最大耗时), 2) as 最大耗时,
848
847
  ROUND(AVG(平均耗时), 2) as 平均耗时
849
848
  FROM api_接口统计
850
849
  WHERE 统计日期 BETWEEN %s AND %s
851
- GROUP BY 接口路径
850
+ GROUP BY 路由地址
852
851
  ORDER BY 最大耗时 DESC
853
852
  LIMIT 5
854
853
  """, (start_date, end_date))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mdbq
3
- Version: 4.2.14
3
+ Version: 4.2.15
4
4
  Home-page: https://pypi.org/project/mdbq
5
5
  Author: xigua,
6
6
  Author-email: 2587125111@qq.com
@@ -1,5 +1,5 @@
1
1
  mdbq/__init__.py,sha256=Il5Q9ATdX8yXqVxtP_nYqUhExzxPC_qk_WXQ_4h0exg,16
2
- mdbq/__version__.py,sha256=cr0O8LE2OkusCNWIOsW9RXnz5146D47fEK2fLTbqnUU,18
2
+ mdbq/__version__.py,sha256=JI02MOsH9qq5B-OU52C1oxtIpP6VHv0R96xLUqhnZoQ,18
3
3
  mdbq/auth/__init__.py,sha256=pnPMAt63sh1B6kEvmutUuro46zVf2v2YDAG7q-jV_To,24
4
4
  mdbq/auth/auth_backend.py,sha256=iLN7AqiSq7fQgFtNtge_TIlVOR1hrCSZXH6oId6uGX4,116924
5
5
  mdbq/auth/crypto.py,sha256=M0i4dRljJuE30WH_13ythA2QGKPXZm6TgpnYp6aHOzw,17431
@@ -29,13 +29,12 @@ mdbq/redis/__init__.py,sha256=YtgBlVSMDphtpwYX248wGge1x-Ex_mMufz4-8W0XRmA,12
29
29
  mdbq/redis/getredis.py,sha256=vdg7YQEjhoMp5QzxygNGx5DQKRnePrcwPYgUrDypA6g,23672
30
30
  mdbq/redis/redis_cache.py,sha256=JWarX_l7LvdKyxtUNPANAqd-y20Jg5uqmllCbT-fyv8,45752
31
31
  mdbq/route/__init__.py,sha256=BT_dAY7V-U2o72bevq1B9Mq9QA7GodwtkxyLNdGaoE8,22
32
- mdbq/route/analytics.py,sha256=dngj5hVwKddEUy59nSYbOoJ9C7OVrtCmCkvW6Uj9RYM,28097
33
- mdbq/route/monitor.py,sha256=mY55Y2m0c_1fmC2A6GNWvkSbW3OsXPs3jOI-9HHTYMw,40738
32
+ mdbq/route/monitor.py,sha256=qMgIMD1knYQWOoAG0VgbPRE8Gcd673JnvMZkSsZlDGk,40653
34
33
  mdbq/route/routes.py,sha256=QVGfTvDgu0CpcKCvk1ra74H8uojgqTLUav1fnVAqLEA,29433
35
34
  mdbq/selenium/__init__.py,sha256=AKzeEceqZyvqn2dEDoJSzDQnbuENkJSHAlbHAD0u0ZI,10
36
35
  mdbq/selenium/get_driver.py,sha256=1NTlVUE6QsyjTrVVVqTO2LOnYf578ccFWlWnvIXGtic,20903
37
36
  mdbq/spider/__init__.py,sha256=RBMFXGy_jd1HXZhngB2T2XTvJqki8P_Fr-pBcwijnew,18
38
- mdbq-4.2.14.dist-info/METADATA,sha256=aNX05FUtA3v3lVAh0N2n7q8Wp-usLo5yXiL4H5qN9Lc,364
39
- mdbq-4.2.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
40
- mdbq-4.2.14.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
41
- mdbq-4.2.14.dist-info/RECORD,,
37
+ mdbq-4.2.15.dist-info/METADATA,sha256=gbbF14QD1DHVWxAdk4DK2ljrzRkhUZwZRqB8HGs1do0,364
38
+ mdbq-4.2.15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
39
+ mdbq-4.2.15.dist-info/top_level.txt,sha256=2FQ-uLnCSB-OwFiWntzmwosW3X2Xqsg0ewh1axsaylA,5
40
+ mdbq-4.2.15.dist-info/RECORD,,
mdbq/route/analytics.py DELETED
@@ -1,619 +0,0 @@
1
- """
2
- 数据分析工具
3
-
4
- 主要功能:
5
- 1. 实时监控数据查询
6
- 2. 访问趋势分析
7
- 3. 性能分析报告
8
- 4. 异常检测和告警
9
- 5. 用户行为分析
10
-
11
- """
12
-
13
- import os
14
- import json
15
- import pymysql
16
- from datetime import datetime, timedelta
17
- from typing import Dict, Any, List, Optional
18
- from dbutils.pooled_db import PooledDB
19
- from mdbq.myconf import myconf
20
-
21
-
22
- class MonitorAnalytics:
23
- """监控数据分析类"""
24
-
25
- def __init__(self, database='api_monitor_logs'):
26
- """初始化分析工具"""
27
- self.database = database
28
- self.init_database_pool()
29
-
30
- def init_database_pool(self):
31
- """初始化数据库连接池"""
32
- dir_path = os.path.expanduser("~")
33
- config_file = os.path.join(dir_path, 'spd.txt')
34
- parser = myconf.ConfigParser()
35
-
36
- host, port, username, password = parser.get_section_values(
37
- file_path=config_file,
38
- section='mysql',
39
- keys=['host', 'port', 'username', 'password'],
40
- )
41
-
42
- self.pool = PooledDB(
43
- creator=pymysql,
44
- maxconnections=5, # 增加连接数避免冲突
45
- mincached=2, # 增加最小缓存连接数
46
- maxcached=5, # 增加最大缓存连接数
47
- blocking=True,
48
- host=host,
49
- port=int(port),
50
- user=username,
51
- password=password,
52
- database=self.database,
53
- ping=1,
54
- charset='utf8mb4',
55
- cursorclass=pymysql.cursors.DictCursor,
56
- # 添加连接超时设置
57
- connect_timeout=10,
58
- read_timeout=30,
59
- write_timeout=30
60
- )
61
-
62
- def get_realtime_metrics(self) -> Dict[str, Any]:
63
- """获取实时监控指标"""
64
- try:
65
- connection = self.pool.connection()
66
- try:
67
- with connection.cursor() as cursor:
68
- now = datetime.now()
69
- last_hour = now - timedelta(hours=1)
70
- last_day = now - timedelta(days=1)
71
-
72
- # 最近1小时的请求统计
73
- cursor.execute("""
74
- SELECT
75
- COUNT(*) as requests_last_hour,
76
- COUNT(DISTINCT client_ip) as unique_ips_last_hour,
77
- AVG(process_time) as avg_response_time,
78
- MAX(process_time) as max_response_time,
79
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) as error_count,
80
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) / COUNT(*) * 100 as error_rate,
81
- SUM(CASE WHEN is_bot = 1 THEN 1 ELSE 0 END) as bot_requests,
82
- SUM(CASE WHEN is_mobile = 1 THEN 1 ELSE 0 END) as mobile_requests
83
- FROM api_request_logs
84
- WHERE timestamp >= %s
85
- """, (last_hour,))
86
-
87
- hourly_stats = cursor.fetchone() or {}
88
-
89
- # 最近24小时趋势对比
90
- cursor.execute("""
91
- SELECT
92
- COUNT(*) as requests_last_day,
93
- COUNT(DISTINCT client_ip) as unique_ips_last_day,
94
- AVG(process_time) as avg_response_time_day
95
- FROM api_request_logs
96
- WHERE timestamp >= %s
97
- """, (last_day,))
98
-
99
- daily_stats = cursor.fetchone() or {}
100
-
101
- # 热门端点(最近1小时)
102
- cursor.execute("""
103
- SELECT endpoint, COUNT(*) as request_count,
104
- AVG(process_time) as avg_time
105
- FROM api_request_logs
106
- WHERE timestamp >= %s AND endpoint IS NOT NULL
107
- GROUP BY endpoint
108
- ORDER BY request_count DESC
109
- LIMIT 5
110
- """, (last_hour,))
111
-
112
- top_endpoints = cursor.fetchall()
113
-
114
- # 慢查询(最近1小时)
115
- cursor.execute("""
116
- SELECT endpoint, process_time, client_ip, timestamp
117
- FROM api_request_logs
118
- WHERE timestamp >= %s AND process_time > 5000
119
- ORDER BY process_time DESC
120
- LIMIT 10
121
- """, (last_hour,))
122
-
123
- slow_requests = cursor.fetchall()
124
-
125
- # 错误请求(最近1小时)
126
- cursor.execute("""
127
- SELECT endpoint, response_status, COUNT(*) as error_count
128
- FROM api_request_logs
129
- WHERE timestamp >= %s AND response_status >= 400
130
- GROUP BY endpoint, response_status
131
- ORDER BY error_count DESC
132
- LIMIT 10
133
- """, (last_hour,))
134
-
135
- error_requests = cursor.fetchall()
136
-
137
- return {
138
- 'realtime_metrics': {
139
- 'requests_per_hour': hourly_stats.get('requests_last_hour', 0),
140
- 'requests_per_day': daily_stats.get('requests_last_day', 0),
141
- 'unique_ips_hour': hourly_stats.get('unique_ips_last_hour', 0),
142
- 'unique_ips_day': daily_stats.get('unique_ips_last_day', 0),
143
- 'avg_response_time': round(hourly_stats.get('avg_response_time', 0) or 0, 2),
144
- 'max_response_time': round(hourly_stats.get('max_response_time', 0) or 0, 2),
145
- 'error_rate': round(hourly_stats.get('error_rate', 0) or 0, 2),
146
- 'error_count': hourly_stats.get('error_count', 0),
147
- 'bot_requests': hourly_stats.get('bot_requests', 0),
148
- 'mobile_requests': hourly_stats.get('mobile_requests', 0)
149
- },
150
- 'top_endpoints': top_endpoints,
151
- 'slow_requests': slow_requests,
152
- 'error_requests': error_requests,
153
- 'timestamp': now.isoformat()
154
- }
155
- finally:
156
- connection.close()
157
-
158
- except Exception as e:
159
- return {'error': str(e)}
160
-
161
- def get_traffic_trend(self, days: int = 7) -> Dict[str, Any]:
162
- """获取流量趋势分析"""
163
- try:
164
- connection = self.pool.connection()
165
- try:
166
- with connection.cursor() as cursor:
167
- end_date = datetime.now().date()
168
- start_date = end_date - timedelta(days=days)
169
-
170
- # 按小时统计(最近7天)
171
- cursor.execute("""
172
- SELECT
173
- DATE(timestamp) as date,
174
- HOUR(timestamp) as hour,
175
- COUNT(*) as requests,
176
- COUNT(DISTINCT client_ip) as unique_ips,
177
- AVG(process_time) as avg_response_time,
178
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) as errors
179
- FROM api_request_logs
180
- WHERE DATE(timestamp) BETWEEN %s AND %s
181
- GROUP BY DATE(timestamp), HOUR(timestamp)
182
- ORDER BY date, hour
183
- """, (start_date, end_date))
184
-
185
- hourly_data = cursor.fetchall()
186
-
187
- # 按天统计
188
- cursor.execute("""
189
- SELECT
190
- DATE(timestamp) as date,
191
- COUNT(*) as requests,
192
- COUNT(DISTINCT client_ip) as unique_ips,
193
- AVG(process_time) as avg_response_time,
194
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) as errors,
195
- SUM(CASE WHEN is_bot = 1 THEN 1 ELSE 0 END) as bot_requests,
196
- SUM(CASE WHEN is_mobile = 1 THEN 1 ELSE 0 END) as mobile_requests
197
- FROM api_request_logs
198
- WHERE DATE(timestamp) BETWEEN %s AND %s
199
- GROUP BY DATE(timestamp)
200
- ORDER BY date
201
- """, (start_date, end_date))
202
-
203
- daily_data = cursor.fetchall()
204
-
205
- # 周中模式分析
206
- cursor.execute("""
207
- SELECT
208
- DAYOFWEEK(timestamp) as day_of_week,
209
- DAYNAME(timestamp) as day_name,
210
- COUNT(*) as total_requests,
211
- AVG(process_time) as avg_response_time
212
- FROM api_request_logs
213
- WHERE DATE(timestamp) BETWEEN %s AND %s
214
- GROUP BY DAYOFWEEK(timestamp), DAYNAME(timestamp)
215
- ORDER BY day_of_week
216
- """, (start_date, end_date))
217
-
218
- weekly_pattern = cursor.fetchall()
219
-
220
- # 小时模式分析
221
- cursor.execute("""
222
- SELECT
223
- HOUR(timestamp) as hour,
224
- COUNT(*) as total_requests,
225
- AVG(process_time) as avg_response_time
226
- FROM api_request_logs
227
- WHERE DATE(timestamp) BETWEEN %s AND %s
228
- GROUP BY HOUR(timestamp)
229
- ORDER BY hour
230
- """, (start_date, end_date))
231
-
232
- hourly_pattern = cursor.fetchall()
233
-
234
- return {
235
- 'period': f'{start_date} to {end_date}',
236
- 'hourly_data': hourly_data,
237
- 'daily_data': daily_data,
238
- 'weekly_pattern': weekly_pattern,
239
- 'hourly_pattern': hourly_pattern
240
- }
241
- finally:
242
- connection.close()
243
-
244
- except Exception as e:
245
- return {'error': str(e)}
246
-
247
- def get_endpoint_analysis(self, days: int = 7) -> Dict[str, Any]:
248
- """获取端点性能分析"""
249
- try:
250
- connection = self.pool.connection()
251
- try:
252
- with connection.cursor() as cursor:
253
- end_date = datetime.now().date()
254
- start_date = end_date - timedelta(days=days)
255
-
256
- # 端点性能统计
257
- cursor.execute("""
258
- SELECT
259
- endpoint,
260
- COUNT(*) as total_requests,
261
- AVG(process_time) as avg_response_time,
262
- MIN(process_time) as min_response_time,
263
- MAX(process_time) as max_response_time,
264
- STDDEV(process_time) as response_time_stddev,
265
- COUNT(DISTINCT client_ip) as unique_users,
266
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) as error_count,
267
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) / COUNT(*) * 100 as error_rate,
268
- SUM(request_size) as total_request_size,
269
- SUM(response_size) as total_response_size
270
- FROM api_request_logs
271
- WHERE DATE(timestamp) BETWEEN %s AND %s
272
- AND endpoint IS NOT NULL
273
- GROUP BY endpoint
274
- ORDER BY total_requests DESC
275
- """, (start_date, end_date))
276
-
277
- endpoint_stats = cursor.fetchall()
278
-
279
- # 最慢的端点
280
- slowest_endpoints = sorted(
281
- [ep for ep in endpoint_stats if ep['avg_response_time']],
282
- key=lambda x: x['avg_response_time'] or 0,
283
- reverse=True
284
- )[:10]
285
-
286
- # 错误率最高的端点
287
- error_prone_endpoints = sorted(
288
- [ep for ep in endpoint_stats if (ep['error_rate'] or 0) > 0],
289
- key=lambda x: x['error_rate'] or 0,
290
- reverse=True
291
- )[:10]
292
-
293
- # 最热门的端点
294
- popular_endpoints = endpoint_stats[:10]
295
-
296
- return {
297
- 'period': f'{start_date} to {end_date}',
298
- 'all_endpoints': endpoint_stats,
299
- 'slowest_endpoints': slowest_endpoints,
300
- 'error_prone_endpoints': error_prone_endpoints,
301
- 'popular_endpoints': popular_endpoints
302
- }
303
- finally:
304
- connection.close()
305
-
306
- except Exception as e:
307
- return {'error': str(e)}
308
-
309
- def get_user_behavior_analysis(self, days: int = 7) -> Dict[str, Any]:
310
- """获取用户行为分析"""
311
- try:
312
- connection = self.pool.connection()
313
- try:
314
- with connection.cursor() as cursor:
315
- end_date = datetime.now().date()
316
- start_date = end_date - timedelta(days=days)
317
-
318
- # IP访问模式分析
319
- cursor.execute("""
320
- SELECT
321
- client_ip,
322
- COUNT(*) as total_requests,
323
- COUNT(DISTINCT endpoint) as unique_endpoints,
324
- COUNT(DISTINCT DATE(timestamp)) as active_days,
325
- MIN(timestamp) as first_access,
326
- MAX(timestamp) as last_access,
327
- AVG(process_time) as avg_response_time,
328
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) as errors,
329
- SUM(CASE WHEN is_bot = 1 THEN 1 ELSE 0 END) as bot_requests,
330
- user_agent
331
- FROM api_request_logs
332
- WHERE DATE(timestamp) BETWEEN %s AND %s
333
- GROUP BY client_ip, user_agent
334
- HAVING total_requests >= 10
335
- ORDER BY total_requests DESC
336
- LIMIT 50
337
- """, (start_date, end_date))
338
-
339
- ip_analysis = cursor.fetchall()
340
-
341
- # 设备类型统计
342
- cursor.execute("""
343
- SELECT
344
- browser_name,
345
- os_name,
346
- COUNT(*) as request_count,
347
- COUNT(DISTINCT client_ip) as unique_users
348
- FROM api_request_logs
349
- WHERE DATE(timestamp) BETWEEN %s AND %s
350
- AND browser_name != 'Unknown'
351
- GROUP BY browser_name, os_name
352
- ORDER BY request_count DESC
353
- """, (start_date, end_date))
354
-
355
- device_stats = cursor.fetchall()
356
-
357
- # 可疑活动检测
358
- cursor.execute("""
359
- SELECT
360
- client_ip,
361
- COUNT(*) as requests_per_hour,
362
- COUNT(DISTINCT endpoint) as endpoints_accessed,
363
- SUM(CASE WHEN response_status = 404 THEN 1 ELSE 0 END) as not_found_errors,
364
- SUM(CASE WHEN response_status = 403 THEN 1 ELSE 0 END) as forbidden_errors,
365
- MAX(is_bot) as is_bot
366
- FROM api_request_logs
367
- WHERE timestamp >= %s
368
- GROUP BY client_ip
369
- HAVING requests_per_hour > 100
370
- OR not_found_errors > 10
371
- OR forbidden_errors > 5
372
- ORDER BY requests_per_hour DESC
373
- """, (datetime.now() - timedelta(hours=1),))
374
-
375
- suspicious_activity = cursor.fetchall()
376
-
377
- # 用户会话分析
378
- cursor.execute("""
379
- SELECT
380
- session_id,
381
- COUNT(*) as session_requests,
382
- COUNT(DISTINCT endpoint) as endpoints_in_session,
383
- TIMESTAMPDIFF(MINUTE, MIN(timestamp), MAX(timestamp)) as session_duration,
384
- MIN(timestamp) as session_start,
385
- MAX(timestamp) as session_end
386
- FROM api_request_logs
387
- WHERE DATE(timestamp) BETWEEN %s AND %s
388
- AND session_id IS NOT NULL
389
- GROUP BY session_id
390
- HAVING session_requests >= 5
391
- ORDER BY session_duration DESC
392
- LIMIT 20
393
- """, (start_date, end_date))
394
-
395
- session_analysis = cursor.fetchall()
396
-
397
- return {
398
- 'period': f'{start_date} to {end_date}',
399
- 'ip_analysis': ip_analysis,
400
- 'device_statistics': device_stats,
401
- 'suspicious_activity': suspicious_activity,
402
- 'session_analysis': session_analysis
403
- }
404
- finally:
405
- connection.close()
406
-
407
- except Exception as e:
408
- return {'error': str(e)}
409
-
410
- def get_performance_alerts(self) -> Dict[str, Any]:
411
- """获取性能告警信息"""
412
- try:
413
- connection = self.pool.connection()
414
- try:
415
- with connection.cursor() as cursor:
416
- now = datetime.now()
417
- last_hour = now - timedelta(hours=1)
418
-
419
- alerts = []
420
-
421
- # 检查响应时间异常
422
- cursor.execute("""
423
- SELECT endpoint, AVG(process_time) as avg_time
424
- FROM api_request_logs
425
- WHERE timestamp >= %s AND process_time IS NOT NULL
426
- GROUP BY endpoint
427
- HAVING avg_time > 3000
428
- ORDER BY avg_time DESC
429
- """, (last_hour,))
430
-
431
- slow_endpoints = cursor.fetchall()
432
- for endpoint in slow_endpoints:
433
- alerts.append({
434
- 'type': 'SLOW_RESPONSE',
435
- 'severity': 'HIGH' if (endpoint['avg_time'] or 0) > 5000 else 'MEDIUM',
436
- 'message': f"端点 {endpoint['endpoint']} 平均响应时间 {endpoint['avg_time']:.0f}ms",
437
- 'timestamp': now.isoformat()
438
- })
439
-
440
- # 检查错误率异常
441
- cursor.execute("""
442
- SELECT
443
- endpoint,
444
- COUNT(*) as total,
445
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) as errors,
446
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) / COUNT(*) * 100 as error_rate
447
- FROM api_request_logs
448
- WHERE timestamp >= %s
449
- GROUP BY endpoint
450
- HAVING total >= 10 AND error_rate > 10
451
- ORDER BY error_rate DESC
452
- """, (last_hour,))
453
-
454
- error_endpoints = cursor.fetchall()
455
- for endpoint in error_endpoints:
456
- alerts.append({
457
- 'type': 'HIGH_ERROR_RATE',
458
- 'severity': 'HIGH' if (endpoint['error_rate'] or 0) > 20 else 'MEDIUM',
459
- 'message': f"端点 {endpoint['endpoint']} 错误率 {endpoint['error_rate']:.1f}%",
460
- 'timestamp': now.isoformat()
461
- })
462
-
463
- # 检查异常流量
464
- cursor.execute("""
465
- SELECT
466
- client_ip,
467
- COUNT(*) as request_count
468
- FROM api_request_logs
469
- WHERE timestamp >= %s
470
- GROUP BY client_ip
471
- HAVING request_count > 500
472
- ORDER BY request_count DESC
473
- """, (last_hour,))
474
-
475
- high_traffic_ips = cursor.fetchall()
476
- for ip_data in high_traffic_ips:
477
- alerts.append({
478
- 'type': 'HIGH_TRAFFIC',
479
- 'severity': 'MEDIUM',
480
- 'message': f"IP {ip_data['client_ip']} 请求量异常: {ip_data['request_count']} 次/小时",
481
- 'timestamp': now.isoformat()
482
- })
483
-
484
- # 检查系统整体负载
485
- cursor.execute("""
486
- SELECT COUNT(*) as total_requests
487
- FROM api_request_logs
488
- WHERE timestamp >= %s
489
- """, (last_hour,))
490
-
491
- total_requests = cursor.fetchone()['total_requests']
492
- if total_requests > 10000: # 每小时超过1万请求
493
- alerts.append({
494
- 'type': 'HIGH_SYSTEM_LOAD',
495
- 'severity': 'HIGH',
496
- 'message': f"系统负载异常: {total_requests} 请求/小时",
497
- 'timestamp': now.isoformat()
498
- })
499
-
500
- return {
501
- 'alerts': alerts,
502
- 'alert_count': len(alerts),
503
- 'high_severity_count': len([a for a in alerts if a['severity'] == 'HIGH']),
504
- 'timestamp': now.isoformat()
505
- }
506
- finally:
507
- connection.close()
508
-
509
- except Exception as e:
510
- return {'error': str(e)}
511
-
512
- def generate_daily_report(self, target_date: datetime = None) -> Dict[str, Any]:
513
- """生成日报告"""
514
- if target_date is None:
515
- target_date = datetime.now().date() - timedelta(days=1)
516
-
517
- try:
518
- connection = self.pool.connection()
519
- try:
520
- with connection.cursor() as cursor:
521
- # 整体统计
522
- cursor.execute("""
523
- SELECT
524
- COUNT(*) as total_requests,
525
- COUNT(DISTINCT client_ip) as unique_ips,
526
- COUNT(DISTINCT endpoint) as unique_endpoints,
527
- AVG(process_time) as avg_response_time,
528
- MAX(process_time) as max_response_time,
529
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) as total_errors,
530
- SUM(CASE WHEN response_status >= 400 THEN 1 ELSE 0 END) / COUNT(*) * 100 as error_rate,
531
- SUM(CASE WHEN is_bot = 1 THEN 1 ELSE 0 END) as bot_requests,
532
- SUM(CASE WHEN is_mobile = 1 THEN 1 ELSE 0 END) as mobile_requests,
533
- SUM(request_size) as total_request_size,
534
- SUM(response_size) as total_response_size
535
- FROM api_request_logs
536
- WHERE DATE(timestamp) = %s
537
- """, (target_date,))
538
-
539
- daily_summary = cursor.fetchone()
540
-
541
- # 热门端点
542
- cursor.execute("""
543
- SELECT endpoint, COUNT(*) as requests, AVG(process_time) as avg_time
544
- FROM api_request_logs
545
- WHERE DATE(timestamp) = %s
546
- GROUP BY endpoint
547
- ORDER BY requests DESC
548
- LIMIT 10
549
- """, (target_date,))
550
-
551
- top_endpoints = cursor.fetchall()
552
-
553
- # 错误统计
554
- cursor.execute("""
555
- SELECT response_status, COUNT(*) as count
556
- FROM api_request_logs
557
- WHERE DATE(timestamp) = %s AND response_status >= 400
558
- GROUP BY response_status
559
- ORDER BY count DESC
560
- """, (target_date,))
561
-
562
- error_breakdown = cursor.fetchall()
563
-
564
- # 流量分布(按小时)
565
- cursor.execute("""
566
- SELECT
567
- HOUR(timestamp) as hour,
568
- COUNT(*) as requests,
569
- AVG(process_time) as avg_time
570
- FROM api_request_logs
571
- WHERE DATE(timestamp) = %s
572
- GROUP BY HOUR(timestamp)
573
- ORDER BY hour
574
- """, (target_date,))
575
-
576
- hourly_distribution = cursor.fetchall()
577
-
578
- return {
579
- 'date': target_date.isoformat(),
580
- 'summary': daily_summary,
581
- 'top_endpoints': top_endpoints,
582
- 'error_breakdown': error_breakdown,
583
- 'hourly_distribution': hourly_distribution,
584
- 'generated_at': datetime.now().isoformat()
585
- }
586
- finally:
587
- connection.close()
588
-
589
- except Exception as e:
590
- return {'error': str(e)}
591
-
592
-
593
- # 全局分析实例
594
- analytics = MonitorAnalytics()
595
-
596
- # 导出分析函数
597
- def get_realtime_metrics():
598
- """获取实时监控指标"""
599
- return analytics.get_realtime_metrics()
600
-
601
- def get_traffic_trend(days: int = 7):
602
- """获取流量趋势"""
603
- return analytics.get_traffic_trend(days)
604
-
605
- def get_endpoint_analysis(days: int = 7):
606
- """获取端点分析"""
607
- return analytics.get_endpoint_analysis(days)
608
-
609
- def get_user_behavior_analysis(days: int = 7):
610
- """获取用户行为分析"""
611
- return analytics.get_user_behavior_analysis(days)
612
-
613
- def get_performance_alerts():
614
- """获取性能告警"""
615
- return analytics.get_performance_alerts()
616
-
617
- def generate_daily_report(target_date: datetime = None):
618
- """生成日报告"""
619
- return analytics.generate_daily_report(target_date)
File without changes