pytbox 0.0.1__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pytbox might be problematic. Click here for more details.

Files changed (68) hide show
  1. pytbox/alert/alert_handler.py +139 -0
  2. pytbox/alert/ping.py +24 -0
  3. pytbox/alicloud/sls.py +9 -14
  4. pytbox/base.py +121 -0
  5. pytbox/categraf/build_config.py +143 -0
  6. pytbox/categraf/instances.toml +39 -0
  7. pytbox/categraf/jinja2/__init__.py +6 -0
  8. pytbox/categraf/jinja2/input.cpu/cpu.toml.j2 +5 -0
  9. pytbox/categraf/jinja2/input.disk/disk.toml.j2 +11 -0
  10. pytbox/categraf/jinja2/input.diskio/diskio.toml.j2 +6 -0
  11. pytbox/categraf/jinja2/input.dns_query/dns_query.toml.j2 +12 -0
  12. pytbox/categraf/jinja2/input.http_response/http_response.toml.j2 +9 -0
  13. pytbox/categraf/jinja2/input.mem/mem.toml.j2 +5 -0
  14. pytbox/categraf/jinja2/input.net/net.toml.j2 +11 -0
  15. pytbox/categraf/jinja2/input.net_response/net_response.toml.j2 +9 -0
  16. pytbox/categraf/jinja2/input.ping/ping.toml.j2 +11 -0
  17. pytbox/categraf/jinja2/input.prometheus/prometheus.toml.j2 +12 -0
  18. pytbox/categraf/jinja2/input.snmp/cisco_interface.toml.j2 +96 -0
  19. pytbox/categraf/jinja2/input.snmp/cisco_system.toml.j2 +41 -0
  20. pytbox/categraf/jinja2/input.snmp/h3c_interface.toml.j2 +96 -0
  21. pytbox/categraf/jinja2/input.snmp/h3c_system.toml.j2 +41 -0
  22. pytbox/categraf/jinja2/input.snmp/huawei_interface.toml.j2 +96 -0
  23. pytbox/categraf/jinja2/input.snmp/huawei_system.toml.j2 +41 -0
  24. pytbox/categraf/jinja2/input.snmp/ruijie_interface.toml.j2 +96 -0
  25. pytbox/categraf/jinja2/input.snmp/ruijie_system.toml.j2 +41 -0
  26. pytbox/categraf/jinja2/input.vsphere/vsphere.toml.j2 +211 -0
  27. pytbox/cli/__init__.py +7 -0
  28. pytbox/cli/categraf/__init__.py +7 -0
  29. pytbox/cli/categraf/commands.py +55 -0
  30. pytbox/cli/commands/vm.py +22 -0
  31. pytbox/cli/common/__init__.py +6 -0
  32. pytbox/cli/common/options.py +42 -0
  33. pytbox/cli/common/utils.py +269 -0
  34. pytbox/cli/formatters/__init__.py +7 -0
  35. pytbox/cli/formatters/output.py +155 -0
  36. pytbox/cli/main.py +24 -0
  37. pytbox/cli.py +9 -0
  38. pytbox/database/mongo.py +99 -0
  39. pytbox/database/victoriametrics.py +404 -0
  40. pytbox/dida365.py +11 -17
  41. pytbox/excel.py +64 -0
  42. pytbox/feishu/endpoints.py +12 -9
  43. pytbox/{logger.py → log/logger.py} +78 -30
  44. pytbox/{victorialog.py → log/victorialog.py} +2 -2
  45. pytbox/mail/alimail.py +142 -0
  46. pytbox/mail/client.py +171 -0
  47. pytbox/mail/mail_detail.py +30 -0
  48. pytbox/mingdao.py +164 -0
  49. pytbox/network/meraki.py +537 -0
  50. pytbox/notion.py +731 -0
  51. pytbox/pyjira.py +612 -0
  52. pytbox/utils/cronjob.py +79 -0
  53. pytbox/utils/env.py +2 -2
  54. pytbox/utils/load_config.py +132 -0
  55. pytbox/utils/load_vm_devfile.py +45 -0
  56. pytbox/utils/response.py +1 -1
  57. pytbox/utils/richutils.py +31 -0
  58. pytbox/utils/timeutils.py +479 -14
  59. pytbox/vmware.py +120 -0
  60. pytbox/win/ad.py +30 -0
  61. {pytbox-0.0.1.dist-info → pytbox-0.3.1.dist-info}/METADATA +13 -3
  62. pytbox-0.3.1.dist-info/RECORD +72 -0
  63. pytbox-0.3.1.dist-info/entry_points.txt +2 -0
  64. pytbox/common/base.py +0 -0
  65. pytbox/victoriametrics.py +0 -37
  66. pytbox-0.0.1.dist-info/RECORD +0 -21
  67. {pytbox-0.0.1.dist-info → pytbox-0.3.1.dist-info}/WHEEL +0 -0
  68. {pytbox-0.0.1.dist-info → pytbox-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,139 @@
1
+ #!/usr/bin/env python3
2
+
3
+
4
+ import uuid
5
+ from typing import Literal
6
+ from ..database.mongo import Mongo
7
+ from ..feishu.client import Client as FeishuClient
8
+ from ..dida365 import Dida365
9
+ from ..utils.timeutils import TimeUtils
10
+ from ..mail.client import MailClient
11
+
12
+
13
+ class AlertHandler:
14
+
15
+ def __init__(self,
16
+ config: dict=None,
17
+ mongo_client: Mongo=None,
18
+ feishu_client: FeishuClient=None,
19
+ dida_client: Dida365=None,
20
+ mail_client: MailClient=None,
21
+ env: Literal['dev', 'prod']='prod'
22
+ ):
23
+
24
+ self.config = config
25
+ self.mongo = mongo_client
26
+ self.feishu = feishu_client
27
+ self.dida = dida_client
28
+ self.mail = mail_client
29
+ self.env = env
30
+
31
+ def send_alert(self,
32
+ event_id: str=None,
33
+ event_type: Literal['trigger', 'resolved'] ='trigger',
34
+ event_time: str=None,
35
+ event_name: str=None,
36
+ event_content: str=None,
37
+ entity_name: str=None,
38
+ priority: Literal['critical', 'high', 'warning']='high',
39
+ resolved_expr: str=None,
40
+ suggestion: str='',
41
+ troubleshot: str='暂无',
42
+ mongo_id: str=None
43
+ ):
44
+
45
+ if not event_id:
46
+ event_id = str(uuid.uuid4())
47
+ if not event_time:
48
+ event_time = TimeUtils.get_now_time_mongo()
49
+
50
+ if self.mongo.check_alarm_exist(event_type=event_type, event_content=event_content):
51
+ if event_type == "trigger":
52
+ self.mongo.collection.insert_one(
53
+ {
54
+ 'event_id': event_id,
55
+ 'event_type': event_type,
56
+ 'event_name': event_name,
57
+ 'event_time': event_time,
58
+ 'event_content': event_content,
59
+ 'entity_name': entity_name,
60
+ 'priority': priority,
61
+ 'resolved_expr': resolved_expr,
62
+ 'suggestion': suggestion,
63
+ 'troubleshot': troubleshot,
64
+ }
65
+ )
66
+ elif event_type == "resolved":
67
+ filter_doc = {"_id": mongo_id}
68
+ update = {"$set": { "resolved_time": event_time}}
69
+ self.mongo.collection.update_one(filter_doc, update)
70
+ alarm_time = self.mongo.collection.find_one(filter_doc, {'event_time': 1})['event_time']
71
+
72
+ content = [
73
+ f'**事件名称**: {event_name}',
74
+ f'**告警时间**: {TimeUtils.convert_timeobj_to_str(timeobj=event_time, timezone_offset=0) if event_type == "trigger" else TimeUtils.convert_timeobj_to_str(timeobj=alarm_time, timezone_offset=8)}',
75
+ f'**事件内容**: {event_content + " 已恢复" if event_type == "resolved" else event_content}',
76
+ f'**告警实例**: {entity_name}',
77
+ f'**建议**: {suggestion}',
78
+ f'**故障排查**: {troubleshot}',
79
+ f'**历史告警**: {self.mongo.recent_alerts(event_content=event_content)}'
80
+ ]
81
+
82
+ if event_type == "resolved":
83
+ content.insert(2, f'**恢复时间**: {TimeUtils.convert_timeobj_to_str(event_time, timezone_offset=0)}')
84
+
85
+ if self.config['feishu']['enable_alert']:
86
+ self.feishu.extensions.send_message_notify(
87
+ receive_id=self.config['feishu']['receive_id'],
88
+ color='red' if event_type == "trigger" else 'green',
89
+ title=event_content + " 已恢复" if event_type == "resolved" else event_content,
90
+ priority=priority,
91
+ sub_title='测试告警, 无需处理' if self.env == 'dev' else '',
92
+ content='\n'.join(content)
93
+ )
94
+
95
+ if self.config['mail']['enable_mail']:
96
+ if event_type == "trigger":
97
+ self.mail.send_mail(
98
+ receiver=[self.config['mail']['mail_address']],
99
+ subject=f"{self.config['mail']['subject_trigger']}, {event_content}",
100
+ contents=f"event_content:{event_content}, alarm_time: {str(event_time)}, event_id: {event_id}, alarm_name: {event_name}, entity_name: {entity_name}, priority: {priority}, automate_ts: {troubleshot}, suggestion: {suggestion}"
101
+ )
102
+ else:
103
+ self.mail.send_mail(
104
+ receiver=[self.config['mail']['mail_address']],
105
+ subject=f"{self.config['mail']['subject_resolved']}, {event_content}",
106
+ contents=f"event_content:{event_content}, alarm_time: {str(TimeUtils.get_now_time_mongo())}, event_id: {event_id}, alarm_name: {event_name}, entity_name: {entity_name}, priority: {priority}, automate_ts: {troubleshot}, suggestion: {suggestion}"
107
+ )
108
+
109
+ if self.config['dida']['enable_alert']:
110
+ if event_type == "trigger":
111
+ res = self.dida.task_create(
112
+ project_id=self.config['dida']['alert_project_id'],
113
+ title=event_content,
114
+ content='\n'.join(content),
115
+ tags=['L-监控告警', priority]
116
+ )
117
+ dida_task_id = res.data.get("id")
118
+ self.mongo.collection.update_one(
119
+ {
120
+ "event_id": event_id
121
+ },
122
+ {
123
+ "$set": {
124
+ "dida_task_id": dida_task_id
125
+ }
126
+ }
127
+ )
128
+ else:
129
+ task_id = self.mongo.collection.find_one(filter_doc, {'dida_task_id': 1})['dida_task_id']
130
+ self.dida.task_update(
131
+ task_id=task_id,
132
+ project_id=self.config['dida']['alert_project_id'],
133
+ content=f'\n**恢复时间**: {TimeUtils.convert_timeobj_to_str(timeobj=event_time, timezone_offset=0)}'
134
+ )
135
+ self.dida.task_complete(task_id=task_id, project_id=self.config['dida']['alert_project_id'])
136
+
137
+
138
+ if self.config['wecom']['enable']:
139
+ pass
pytbox/alert/ping.py ADDED
@@ -0,0 +1,24 @@
1
+ #!/usr/bin/env python3
2
+
3
+ from ..database.victoriametrics import VictoriaMetrics
4
+ from ..lib.load_config import load_config
5
+
6
+
7
+ def ping(config, target):
8
+ vm = VictoriaMetrics(url=config['victoriametrics']['url'])
9
+ ping_status = vm.query_ping_status(target=target, last_minutes=config['alert']['ping']['last_minutes'])
10
+ if ping_status == '不通':
11
+ insert_alert(
12
+ event_name=config['alert']['ping']['event_name'],
13
+ event_content=f"{target} {config['alert']['ping']['event_name']}",
14
+ entity_name=target,
15
+ priority=config['alert']['ping']['priority'],
16
+ resolved_query={
17
+ "target": target
18
+ }
19
+ )
20
+
21
+
22
+ if __name__ == "__main__":
23
+ config = load_config()
24
+ ping(config, "10.30.35.38")
pytbox/alicloud/sls.py CHANGED
@@ -5,29 +5,23 @@ from typing import Literal
5
5
  from aliyun.log import GetLogsRequest, LogItem, PutLogsRequest
6
6
  from aliyun.log import LogClient as SlsLogClient
7
7
  from aliyun.log.auth import AUTH_VERSION_4
8
- from ..utils.env import check_env
9
8
 
10
9
 
11
10
 
12
11
  class AliCloudSls:
13
-
14
- def __init__(self, access_key_id: str=None, access_key_secret: str=None, project: str, logstore: str):
12
+ '''
13
+ pip install -U aliyun-log-python-sdk
14
+ '''
15
+ def __init__(self, access_key_id: str=None, access_key_secret: str=None, project: str=None, logstore: str=None, env: str='prod'):
15
16
  # 日志服务的服务接入点
16
17
  self.endpoint = "cn-shanghai.log.aliyuncs.com"
17
18
  # 创建 LogClient 实例,使用 V4 签名,根据实际情况填写 region,这里以杭州为例
18
19
  self.client = SlsLogClient(self.endpoint, access_key_id, access_key_secret, auth_version=AUTH_VERSION_4, region='cn-shanghai')
19
20
  self.project = project
20
21
  self.logstore = logstore
22
+ self.env = env
21
23
 
22
24
  def get_logs(self, project_name, logstore_name, query, from_time, to_time):
23
- # Project名称。
24
- # project_name = "sh-prod-network-devices-log"
25
- # Logstore名称
26
- # logstore_name = "sh-prod-network-devices-log"
27
- # 查询语句。
28
- # query = "*| select dev,id from " + logstore_name
29
- # query = "*"
30
- # 索引。
31
25
  logstore_index = {'line': {
32
26
  'token': [',', ' ', "'", '"', ';', '=', '(', ')', '[', ']', '{', '}', '?', '@', '&', '<', '>', '/', ':', '\n', '\t',
33
27
  '\r'], 'caseSensitive': False, 'chn': False}, 'keys': {'dev': {'type': 'text',
@@ -65,7 +59,7 @@ class AliCloudSls:
65
59
  log_group = []
66
60
  log_item = LogItem()
67
61
  contents = [
68
- ('env', check_env()),
62
+ ('env', self.env),
69
63
  ('level', level),
70
64
  ('app', app),
71
65
  ('msg', msg),
@@ -77,8 +71,9 @@ class AliCloudSls:
77
71
  log_item.set_contents(contents)
78
72
  log_group.append(log_item)
79
73
  request = PutLogsRequest(self.project, self.logstore, topic, "", log_group, compress=False)
80
- self.client.put_logs(request)
81
-
74
+ r = self.client.put_logs(request)
75
+ return r
76
+
82
77
  def put_logs_for_meraki(self, alert):
83
78
  log_group = []
84
79
  log_item = LogItem()
pytbox/base.py ADDED
@@ -0,0 +1,121 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import os
4
+ from pytbox.database.mongo import Mongo
5
+ from pytbox.utils.load_config import load_config_by_file
6
+ from pytbox.database.victoriametrics import VictoriaMetrics
7
+ from pytbox.feishu.client import Client as FeishuClient
8
+ from pytbox.dida365 import Dida365
9
+ from pytbox.alert.alert_handler import AlertHandler
10
+ from pytbox.log.logger import AppLogger
11
+ from pytbox.network.meraki import Meraki
12
+ from pytbox.utils.env import get_env_by_os_environment
13
+ from pytbox.vmware import VMwareClient
14
+ from pytbox.pyjira import PyJira
15
+ from pytbox.mail.client import MailClient
16
+ from pytbox.mail.alimail import AliMail
17
+ from pytbox.alicloud.sls import AliCloudSls
18
+ from pytbox.utils.cronjob import cronjob_counter
19
+ from pytbox.notion import Notion
20
+ from pytbox.mingdao import Mingdao
21
+
22
+
23
+ config = load_config_by_file(path='/workspaces/pytbox/tests/alert/config_dev.toml', oc_vault_id=os.environ.get('oc_vault_id'))
24
+
25
+
26
+ def get_mongo(collection):
27
+ return Mongo(
28
+ host=config['mongo']['host'],
29
+ port=config['mongo']['port'],
30
+ username=config['mongo']['username'],
31
+ password=config['mongo']['password'],
32
+ auto_source=config['mongo']['auto_source'],
33
+ db_name=config['mongo']['db_name'],
34
+ collection=collection
35
+ )
36
+
37
+ vm = VictoriaMetrics(url=config['victoriametrics']['url'])
38
+
39
+ feishu = FeishuClient(
40
+ app_id=config['feishu']['app_id'],
41
+ app_secret=config['feishu']['app_secret']
42
+ )
43
+ dida = Dida365(
44
+ cookie=config['dida']['cookie'],
45
+ access_token=config['dida']['access_token']
46
+ )
47
+
48
+
49
+ alert_handler = AlertHandler(config=config, mongo_client=get_mongo('alert_test'), feishu_client=feishu, dida_client=dida)
50
+
51
+ def get_logger(app):
52
+ return AppLogger(
53
+ app_name=app,
54
+ enable_victorialog=True,
55
+ victorialog_url=config['victorialog']['url'],
56
+ feishu=feishu,
57
+ dida=dida,
58
+ mongo=get_mongo('alert_program')
59
+ )
60
+
61
+ def get_logger_sls(app):
62
+ return AppLogger(
63
+ app_name=app,
64
+ enable_sls=True,
65
+ feishu=feishu,
66
+ dida=dida,
67
+ mongo=get_mongo('alert_program'),
68
+ sls_access_key_id=config['alicloud']['account1']['access_key_id'],
69
+ sls_access_key_secret=config['alicloud']['account1']['access_key_secret'],
70
+ sls_project=config['alicloud']['account1']['project'],
71
+ sls_logstore=config['alicloud']['account1']['logstore']
72
+ )
73
+
74
+ # ad_dev = ADClient(
75
+ # server=config['ad']['dev']['AD_SERVER'],
76
+ # base_dn=config['ad']['dev']['BASE_DN'],
77
+ # username=config['ad']['dev']['AD_USERNAME'],
78
+ # password=config['ad']['dev']['AD_PASSWORD']
79
+ # )
80
+
81
+ # ad_prod = ADClient(
82
+ # server=config['ad']['prod']['AD_SERVER'],
83
+ # base_dn=config['ad']['prod']['BASE_DN'],
84
+ # username=config['ad']['prod']['AD_USERNAME'],
85
+ # password=config['ad']['prod']['AD_PASSWORD']
86
+ # )
87
+
88
+ env = get_env_by_os_environment(check_key='ENV')
89
+ meraki = Meraki(api_key=config['meraki']['api_key'], organization_id=config['meraki']['organization_id'])
90
+
91
+ vmware_test = VMwareClient(
92
+ host=config['vmware']['test']['host'],
93
+ username=config['vmware']['test']['username'],
94
+ password=config['vmware']['test']['password'],
95
+ version=config['vmware']['test']['version'],
96
+ proxies=config['vmware']['test']['proxies']
97
+ )
98
+
99
+ pyjira = PyJira(
100
+ base_url=config['jira']['base_url'],
101
+ username=config['jira']['username'],
102
+ token=config['jira']['token']
103
+ )
104
+
105
+ # mail_163 = MailClient(mail_address=config['mail']['163']['mail_address'], password=config['mail']['163']['password'])
106
+ # mail_qq = MailClient(mail_address=config['mail']['qq']['mail_address'], password=config['mail']['qq']['password'])
107
+ # ali_mail = AliMail(mail_address=config['mail']['aliyun']['mail_address'], client_id=config['mail']['aliyun']['client_id'], client_secret=config['mail']['aliyun']['client_secret'])
108
+
109
+ sls = AliCloudSls(
110
+ access_key_id=config['alicloud']['account1']['access_key_id'],
111
+ access_key_secret=config['alicloud']['account1']['access_key_secret'],
112
+ project=config['alicloud']['account1']['project'],
113
+ logstore=config['alicloud']['account1']['logstore']
114
+ )
115
+
116
+ def get_cronjob_counter(app_type='', app='', comment=None, schedule_interval=None, schedule_cron=None):
117
+ return cronjob_counter(vm=vm, log=get_logger('cronjob_counter'), app_type=app_type, app=app, comment=comment, schedule_interval=schedule_interval, schedule_cron=schedule_cron)
118
+
119
+
120
+ notion = Notion(token=config['notion']['api_secrets'], proxy=config['notion']['proxy'])
121
+ mingdao = Mingdao(app_key=config['mingdao']['app_key'], sign=config['mingdao']['sign'])
@@ -0,0 +1,143 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import os
4
+ from pathlib import Path
5
+ from pytbox.utils.load_config import load_config_by_file
6
+ from glob import glob
7
+ import os
8
+ from jinja2 import Environment, FileSystemLoader
9
+
10
+
11
+ jinja2_path = Path(__file__).parent / 'jinja2'
12
+ env = Environment(loader=FileSystemLoader(jinja2_path))
13
+
14
+ ping_template = env.get_template('input.ping/ping.toml.j2')
15
+ prometheus_template = env.get_template('input.prometheus/prometheus.toml.j2')
16
+
17
+
18
+ class BuildConfig:
19
+ '''
20
+ 生成配置
21
+
22
+ Args:
23
+ instances (_type_): _description_
24
+ output_dir (_type_): _description_
25
+ '''
26
+ def __init__(self, instances, output_dir):
27
+ self.instances = load_config_by_file(instances)
28
+ self.output_dir = output_dir
29
+
30
+ def _get_template(self, template_name):
31
+ return env.get_template(template_name)
32
+
33
+ def common(self, input_name):
34
+ template = self._get_template(f'input.{input_name}/{input_name}.toml.j2')
35
+ render_data = template.render()
36
+ target_dir = Path(self.output_dir) / f'input.{input_name}'
37
+ if not target_dir.exists():
38
+ target_dir.mkdir(parents=True, exist_ok=True)
39
+
40
+ with open(Path(self.output_dir) / f'input.{input_name}' / f'{input_name}.toml', 'w', encoding='utf-8') as f:
41
+ f.write(render_data)
42
+
43
+ def ping(self):
44
+ if self.instances.get('ping'):
45
+ instances = self.instances['ping']['instance']
46
+ render_data = ping_template.render(instances=instances)
47
+ target_dir = Path(self.output_dir) / 'input.ping'
48
+ if not target_dir.exists():
49
+ target_dir.mkdir(parents=True, exist_ok=True)
50
+
51
+ with open(Path(self.output_dir) / 'input.ping' / 'ping.toml', 'w', encoding='utf-8') as f:
52
+ f.write(render_data)
53
+
54
+ def prometheus(self):
55
+ if self.instances.get('prometheus'):
56
+ instances = self.instances['prometheus']['urls']
57
+ render_data = prometheus_template.render(instances=instances)
58
+ target_dir = Path(self.output_dir) / 'input.prometheus'
59
+ if not target_dir.exists():
60
+ target_dir.mkdir(parents=True, exist_ok=True)
61
+ with open(Path(self.output_dir) / 'input.prometheus' / 'prometheus.toml', 'w', encoding='utf-8') as f:
62
+ f.write(render_data)
63
+
64
+ def vsphere(self):
65
+ if self.instances.get('vsphere'):
66
+ template = self._get_template('input.vsphere/vsphere.toml.j2')
67
+ instances = self.instances['vsphere']['instance']
68
+ render_data = template.render(instances=instances)
69
+ target_dir = Path(self.output_dir) / 'input.vsphere'
70
+ if not target_dir.exists():
71
+ target_dir.mkdir(parents=True, exist_ok=True)
72
+ with open(Path(self.output_dir) / 'input.vsphere' / 'vsphere.toml', 'w', encoding='utf-8') as f:
73
+ f.write(render_data)
74
+
75
+ def http_response(self):
76
+ template = self._get_template('input.http_response/http_response.toml.j2')
77
+ if self.instances.get('http_response'):
78
+ instances = self.instances['http_response']['instance']
79
+ render_data = template.render(instances=instances)
80
+ target_dir = Path(self.output_dir) / 'input.http_response'
81
+ if not target_dir.exists():
82
+ target_dir.mkdir(parents=True, exist_ok=True)
83
+ with open(Path(self.output_dir) / 'input.http_response' / 'http_response.toml', 'w', encoding='utf-8') as f:
84
+ f.write(render_data)
85
+
86
+ def net_response(self):
87
+ template = self._get_template('input.net_response/net_response.toml.j2')
88
+ if self.instances.get('net_response'):
89
+ instances = self.instances['net_response']['instance']
90
+ render_data = template.render(instances=instances)
91
+ target_dir = Path(self.output_dir) / 'input.net_response'
92
+ if not target_dir.exists():
93
+ target_dir.mkdir(parents=True, exist_ok=True)
94
+ with open(Path(self.output_dir) / 'input.net_response' / 'net_response.toml', 'w', encoding='utf-8') as f:
95
+ f.write(render_data)
96
+
97
+ def dns_query(self):
98
+ template = self._get_template('input.dns_query/dns_query.toml.j2')
99
+ if self.instances.get('dns_query'):
100
+ instances = self.instances['dns_query']['instance']
101
+ render_data = template.render(instances=instances)
102
+ target_dir = Path(self.output_dir) / 'input.dns_query'
103
+ if not target_dir.exists():
104
+ target_dir.mkdir(parents=True, exist_ok=True)
105
+ with open(Path(self.output_dir) / 'input.dns_query' / 'dns_query.toml', 'w', encoding='utf-8') as f:
106
+ f.write(render_data)
107
+
108
+ def snmp(self):
109
+ if self.instances.get('snmp'):
110
+ device_types = self.instances['snmp']['instances']
111
+ for device_type in device_types:
112
+ instances = self.instances['snmp']['instances'][device_type]
113
+ jinja2_dir = Path(jinja2_path) / 'input.snmp'
114
+ device_templates = glob(str(jinja2_dir / f'{device_type}_*.toml.j2'))
115
+ if not device_templates:
116
+ continue
117
+ for tmpl_path in device_templates:
118
+ tmpl_name = os.path.basename(tmpl_path)
119
+ base_name = tmpl_name.replace('.toml.j2', '')
120
+ template = self._get_template(f'input.snmp/{tmpl_name}')
121
+ render_data = template.render(instances=instances, config=self.instances['snmp']['config'])
122
+
123
+ target_dir = Path(self.output_dir) / 'input.snmp'
124
+ if not target_dir.exists():
125
+ target_dir.mkdir(parents=True, exist_ok=True)
126
+
127
+ output_file = target_dir / f'{base_name}.toml'
128
+ with open(output_file, 'w', encoding='utf-8') as f:
129
+ f.write(render_data)
130
+
131
+ def run(self):
132
+ self.common('cpu')
133
+ self.common('mem')
134
+ self.common('net')
135
+ self.common('disk')
136
+ self.common('diskio')
137
+ self.vsphere()
138
+ self.ping()
139
+ self.prometheus()
140
+ self.http_response()
141
+ self.net_response()
142
+ self.dns_query()
143
+ self.snmp()
@@ -0,0 +1,39 @@
1
+ [ping]
2
+ [[ping.instance]]
3
+ "10.1.1.1" = { name = "x", env = "prod" }
4
+ "10.1.1.2" = { name = "demo02", env = "dev" }
5
+
6
+ [prometheus]
7
+ [[prometheus.urls]]
8
+ "http://10.1.1.1:9100" = { name = "x", env = "prod" }
9
+
10
+ [vsphere]
11
+ [[vsphere.instance]]
12
+ "test" = { vcenter = "https://10.1.1.1/sdk", username = "categraf@vsphere.local", password = "xxx" }
13
+
14
+ [http_response]
15
+ [[http_response.instance]]
16
+ "https://www.baidu.com" = { name = "x", env = "prod" }
17
+
18
+
19
+ [net_response]
20
+ [[net_response.instance]]
21
+ "124.74.245.90:8443" = { name = "x", env = "prod" }
22
+
23
+ [dns_query]
24
+ [[dns_query.instance]]
25
+ "119.29.29.29_baidu.com" = { dns_server = "119.29.29.29", domains = "www.baidu.com", labels = { name = "x", env = "prod" } }
26
+
27
+ [snmp]
28
+ [snmp.config]
29
+ timeout = '120s'
30
+ path = "/usr/share/snmp/mibs"
31
+ # 支持 h3c,huawei,cisco,ruijie
32
+ [[snmp.instances.h3c]]
33
+ "10.1.1.1:161" = { version = 2, community = "public" }
34
+ "10.1.1.2:161" = { version = 2, community = "public" }
35
+ "10.1.1.3:161" = { version = 3, sec_name = "sec", auth_protocol = "SHA", auth_password = "pass01", priv_protocol = "AES", priv_password = "pass02", sec_level = "authPriv" }
36
+
37
+ # [[snmp.instances.huawei]]
38
+ # "udp://172.16.1.1:161" = { version = 2, community = "public"}
39
+ # "udp://172.16.1.2:161" = { version = 2, community = "public"}
@@ -0,0 +1,6 @@
1
+ """
2
+ Categraf Jinja2 模板文件包
3
+ """
4
+
5
+ # 这个文件确保 jinja2 目录被 Python 识别为包
6
+ # 这样模板文件就会被包含在安装包中
@@ -0,0 +1,5 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # whether collect per cpu
5
+ # collect_per_cpu = false
@@ -0,0 +1,11 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # By default stats will be gathered for all mount points.
5
+ # # Set mount_points will restrict the stats to only the specified mount points.
6
+ # mount_points = ["/"]
7
+
8
+ # Ignore mount points by filesystem type.
9
+ ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs", "nsfs", "CDFS", "fuse.juicefs"]
10
+
11
+ ignore_mount_points = ["/boot", "/var/lib/kubelet/pods"]
@@ -0,0 +1,6 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # By default, categraf will gather stats for all devices including disk partitions.
5
+ # # Setting devices will restrict the stats to the specified devices.
6
+ # devices = ["sda", "sdb", "vd*"]
@@ -0,0 +1,12 @@
1
+ {% for instance in instances %}
2
+ {% for name, data in instance.items() %}
3
+ [[instances]]
4
+ # append some labels for series
5
+ labels = { {% for k, v in data['labels'].items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
6
+ servers = ["{{ data['dns_server'] }}"]
7
+ ## Domains or subdomains to query.
8
+ domains = ["{{ data['domains'] }}"]
9
+ record_type = "A"
10
+ timeout = 2
11
+ {% endfor %}
12
+ {% endfor %}
@@ -0,0 +1,9 @@
1
+ {% for instance in instances %}
2
+ {% for target, labels in instance.items() %}
3
+ [[instances]]
4
+ targets = [
5
+ "{{ target }}"
6
+ ]
7
+ labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
8
+ {% endfor %}
9
+ {% endfor %}
@@ -0,0 +1,5 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # whether collect platform specified metrics
5
+ collect_platform_fields = true
@@ -0,0 +1,11 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # whether collect protocol stats on Linux
5
+ # collect_protocol_stats = false
6
+
7
+ # # setting interfaces will tell categraf to gather these explicit interfaces
8
+ # interfaces = ["eth0"]
9
+
10
+ # enable_loopback_stats=true
11
+ # enable_link_down_stats=true
@@ -0,0 +1,9 @@
1
+ {% for instance in instances %}
2
+ {% for target, labels in instance.items() %}
3
+ [[instances]]
4
+ targets = [
5
+ "{{ target }}"
6
+ ]
7
+ labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
8
+ {% endfor %}
9
+ {% endfor %}
@@ -0,0 +1,11 @@
1
+ {% for instance in instances %}
2
+ {% for target, labels in instance.items() %}
3
+ [[instances]]
4
+ targets = [
5
+ "{{ target }}"
6
+ ]
7
+ method = "exec"
8
+ count = 3
9
+ labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
10
+ {% endfor %}
11
+ {% endfor %}
@@ -0,0 +1,12 @@
1
+ {% for instance in instances %}
2
+ {% for instance, labels in instance.items() %}
3
+ [[instances]]
4
+ urls = [
5
+ "{{ instance }}"
6
+ ]
7
+
8
+ url_label_key = "instance"
9
+ url_label_value = "{{ '{{.Host}}' }}"
10
+ labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
11
+ {% endfor %}
12
+ {% endfor %}