pytbox 0.0.7__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pytbox might be problematic. Click here for more details.

Files changed (55) hide show
  1. pytbox/alert/alert_handler.py +27 -7
  2. pytbox/alert/ping.py +0 -1
  3. pytbox/alicloud/sls.py +9 -6
  4. pytbox/base.py +74 -1
  5. pytbox/categraf/build_config.py +95 -32
  6. pytbox/categraf/instances.toml +39 -0
  7. pytbox/categraf/jinja2/__init__.py +6 -0
  8. pytbox/categraf/jinja2/input.cpu/cpu.toml.j2 +5 -0
  9. pytbox/categraf/jinja2/input.disk/disk.toml.j2 +11 -0
  10. pytbox/categraf/jinja2/input.diskio/diskio.toml.j2 +6 -0
  11. pytbox/categraf/jinja2/input.dns_query/dns_query.toml.j2 +12 -0
  12. pytbox/categraf/jinja2/input.http_response/http_response.toml.j2 +9 -0
  13. pytbox/categraf/jinja2/input.mem/mem.toml.j2 +5 -0
  14. pytbox/categraf/jinja2/input.net/net.toml.j2 +11 -0
  15. pytbox/categraf/jinja2/input.net_response/net_response.toml.j2 +9 -0
  16. pytbox/categraf/jinja2/input.ping/ping.toml.j2 +11 -0
  17. pytbox/categraf/jinja2/input.prometheus/prometheus.toml.j2 +12 -0
  18. pytbox/categraf/jinja2/input.snmp/cisco_interface.toml.j2 +96 -0
  19. pytbox/categraf/jinja2/input.snmp/cisco_system.toml.j2 +41 -0
  20. pytbox/categraf/jinja2/input.snmp/h3c_interface.toml.j2 +96 -0
  21. pytbox/categraf/jinja2/input.snmp/h3c_system.toml.j2 +41 -0
  22. pytbox/categraf/jinja2/input.snmp/huawei_interface.toml.j2 +96 -0
  23. pytbox/categraf/jinja2/input.snmp/huawei_system.toml.j2 +41 -0
  24. pytbox/categraf/jinja2/input.snmp/ruijie_interface.toml.j2 +96 -0
  25. pytbox/categraf/jinja2/input.snmp/ruijie_system.toml.j2 +41 -0
  26. pytbox/categraf/jinja2/input.vsphere/vsphere.toml.j2 +211 -0
  27. pytbox/cli/commands/vm.py +22 -0
  28. pytbox/cli/main.py +2 -0
  29. pytbox/database/mongo.py +1 -1
  30. pytbox/database/victoriametrics.py +331 -40
  31. pytbox/excel.py +64 -0
  32. pytbox/feishu/endpoints.py +6 -6
  33. pytbox/log/logger.py +29 -12
  34. pytbox/mail/alimail.py +142 -0
  35. pytbox/mail/client.py +171 -0
  36. pytbox/mail/mail_detail.py +30 -0
  37. pytbox/mingdao.py +164 -0
  38. pytbox/network/meraki.py +537 -0
  39. pytbox/notion.py +731 -0
  40. pytbox/pyjira.py +612 -0
  41. pytbox/utils/cronjob.py +79 -0
  42. pytbox/utils/env.py +2 -2
  43. pytbox/utils/load_config.py +67 -21
  44. pytbox/utils/load_vm_devfile.py +45 -0
  45. pytbox/utils/richutils.py +11 -1
  46. pytbox/utils/timeutils.py +15 -57
  47. pytbox/vmware.py +120 -0
  48. pytbox/win/ad.py +30 -0
  49. {pytbox-0.0.7.dist-info → pytbox-0.3.1.dist-info}/METADATA +7 -4
  50. pytbox-0.3.1.dist-info/RECORD +72 -0
  51. pytbox/utils/ping_checker.py +0 -1
  52. pytbox-0.0.7.dist-info/RECORD +0 -39
  53. {pytbox-0.0.7.dist-info → pytbox-0.3.1.dist-info}/WHEEL +0 -0
  54. {pytbox-0.0.7.dist-info → pytbox-0.3.1.dist-info}/entry_points.txt +0 -0
  55. {pytbox-0.0.7.dist-info → pytbox-0.3.1.dist-info}/top_level.txt +0 -0
@@ -7,6 +7,7 @@ from ..database.mongo import Mongo
7
7
  from ..feishu.client import Client as FeishuClient
8
8
  from ..dida365 import Dida365
9
9
  from ..utils.timeutils import TimeUtils
10
+ from ..mail.client import MailClient
10
11
 
11
12
 
12
13
  class AlertHandler:
@@ -15,12 +16,17 @@ class AlertHandler:
15
16
  config: dict=None,
16
17
  mongo_client: Mongo=None,
17
18
  feishu_client: FeishuClient=None,
18
- dida_client: Dida365=None
19
+ dida_client: Dida365=None,
20
+ mail_client: MailClient=None,
21
+ env: Literal['dev', 'prod']='prod'
19
22
  ):
23
+
20
24
  self.config = config
21
25
  self.mongo = mongo_client
22
26
  self.feishu = feishu_client
23
27
  self.dida = dida_client
28
+ self.mail = mail_client
29
+ self.env = env
24
30
 
25
31
  def send_alert(self,
26
32
  event_id: str=None,
@@ -34,7 +40,7 @@ class AlertHandler:
34
40
  suggestion: str='',
35
41
  troubleshot: str='暂无',
36
42
  mongo_id: str=None
37
- ):
43
+ ):
38
44
 
39
45
  if not event_id:
40
46
  event_id = str(uuid.uuid4())
@@ -62,11 +68,11 @@ class AlertHandler:
62
68
  update = {"$set": { "resolved_time": event_time}}
63
69
  self.mongo.collection.update_one(filter_doc, update)
64
70
  alarm_time = self.mongo.collection.find_one(filter_doc, {'event_time': 1})['event_time']
65
-
71
+
66
72
  content = [
67
73
  f'**事件名称**: {event_name}',
68
74
  f'**告警时间**: {TimeUtils.convert_timeobj_to_str(timeobj=event_time, timezone_offset=0) if event_type == "trigger" else TimeUtils.convert_timeobj_to_str(timeobj=alarm_time, timezone_offset=8)}',
69
- f'**事件内容**: {event_content}',
75
+ f'**事件内容**: {event_content + " 已恢复" if event_type == "resolved" else event_content}',
70
76
  f'**告警实例**: {entity_name}',
71
77
  f'**建议**: {suggestion}',
72
78
  f'**故障排查**: {troubleshot}',
@@ -78,17 +84,31 @@ class AlertHandler:
78
84
 
79
85
  if self.config['feishu']['enable_alert']:
80
86
  self.feishu.extensions.send_message_notify(
87
+ receive_id=self.config['feishu']['receive_id'],
81
88
  color='red' if event_type == "trigger" else 'green',
82
- title=event_content,
89
+ title=event_content + " 已恢复" if event_type == "resolved" else event_content,
83
90
  priority=priority,
84
- sub_title="",
91
+ sub_title='测试告警, 无需处理' if self.env == 'dev' else '',
85
92
  content='\n'.join(content)
86
93
  )
87
94
 
95
+ if self.config['mail']['enable_mail']:
96
+ if event_type == "trigger":
97
+ self.mail.send_mail(
98
+ receiver=[self.config['mail']['mail_address']],
99
+ subject=f"{self.config['mail']['subject_trigger']}, {event_content}",
100
+ contents=f"event_content:{event_content}, alarm_time: {str(event_time)}, event_id: {event_id}, alarm_name: {event_name}, entity_name: {entity_name}, priority: {priority}, automate_ts: {troubleshot}, suggestion: {suggestion}"
101
+ )
102
+ else:
103
+ self.mail.send_mail(
104
+ receiver=[self.config['mail']['mail_address']],
105
+ subject=f"{self.config['mail']['subject_resolved']}, {event_content}",
106
+ contents=f"event_content:{event_content}, alarm_time: {str(TimeUtils.get_now_time_mongo())}, event_id: {event_id}, alarm_name: {event_name}, entity_name: {entity_name}, priority: {priority}, automate_ts: {troubleshot}, suggestion: {suggestion}"
107
+ )
108
+
88
109
  if self.config['dida']['enable_alert']:
89
110
  if event_type == "trigger":
90
111
  res = self.dida.task_create(
91
-
92
112
  project_id=self.config['dida']['alert_project_id'],
93
113
  title=event_content,
94
114
  content='\n'.join(content),
pytbox/alert/ping.py CHANGED
@@ -1,6 +1,5 @@
1
1
  #!/usr/bin/env python3
2
2
 
3
-
4
3
  from ..database.victoriametrics import VictoriaMetrics
5
4
  from ..lib.load_config import load_config
6
5
 
pytbox/alicloud/sls.py CHANGED
@@ -5,19 +5,21 @@ from typing import Literal
5
5
  from aliyun.log import GetLogsRequest, LogItem, PutLogsRequest
6
6
  from aliyun.log import LogClient as SlsLogClient
7
7
  from aliyun.log.auth import AUTH_VERSION_4
8
- from ..utils.env import check_env
9
8
 
10
9
 
11
10
 
12
11
  class AliCloudSls:
13
-
14
- def __init__(self, access_key_id: str=None, access_key_secret: str=None, project: str, logstore: str):
12
+ '''
13
+ pip install -U aliyun-log-python-sdk
14
+ '''
15
+ def __init__(self, access_key_id: str=None, access_key_secret: str=None, project: str=None, logstore: str=None, env: str='prod'):
15
16
  # 日志服务的服务接入点
16
17
  self.endpoint = "cn-shanghai.log.aliyuncs.com"
17
18
  # 创建 LogClient 实例,使用 V4 签名,根据实际情况填写 region,这里以杭州为例
18
19
  self.client = SlsLogClient(self.endpoint, access_key_id, access_key_secret, auth_version=AUTH_VERSION_4, region='cn-shanghai')
19
20
  self.project = project
20
21
  self.logstore = logstore
22
+ self.env = env
21
23
 
22
24
  def get_logs(self, project_name, logstore_name, query, from_time, to_time):
23
25
  logstore_index = {'line': {
@@ -57,7 +59,7 @@ class AliCloudSls:
57
59
  log_group = []
58
60
  log_item = LogItem()
59
61
  contents = [
60
- ('env', check_env()),
62
+ ('env', self.env),
61
63
  ('level', level),
62
64
  ('app', app),
63
65
  ('msg', msg),
@@ -69,8 +71,9 @@ class AliCloudSls:
69
71
  log_item.set_contents(contents)
70
72
  log_group.append(log_item)
71
73
  request = PutLogsRequest(self.project, self.logstore, topic, "", log_group, compress=False)
72
- self.client.put_logs(request)
73
-
74
+ r = self.client.put_logs(request)
75
+ return r
76
+
74
77
  def put_logs_for_meraki(self, alert):
75
78
  log_group = []
76
79
  log_item = LogItem()
pytbox/base.py CHANGED
@@ -8,6 +8,16 @@ from pytbox.feishu.client import Client as FeishuClient
8
8
  from pytbox.dida365 import Dida365
9
9
  from pytbox.alert.alert_handler import AlertHandler
10
10
  from pytbox.log.logger import AppLogger
11
+ from pytbox.network.meraki import Meraki
12
+ from pytbox.utils.env import get_env_by_os_environment
13
+ from pytbox.vmware import VMwareClient
14
+ from pytbox.pyjira import PyJira
15
+ from pytbox.mail.client import MailClient
16
+ from pytbox.mail.alimail import AliMail
17
+ from pytbox.alicloud.sls import AliCloudSls
18
+ from pytbox.utils.cronjob import cronjob_counter
19
+ from pytbox.notion import Notion
20
+ from pytbox.mingdao import Mingdao
11
21
 
12
22
 
13
23
  config = load_config_by_file(path='/workspaces/pytbox/tests/alert/config_dev.toml', oc_vault_id=os.environ.get('oc_vault_id'))
@@ -35,6 +45,7 @@ dida = Dida365(
35
45
  access_token=config['dida']['access_token']
36
46
  )
37
47
 
48
+
38
49
  alert_handler = AlertHandler(config=config, mongo_client=get_mongo('alert_test'), feishu_client=feishu, dida_client=dida)
39
50
 
40
51
  def get_logger(app):
@@ -45,4 +56,66 @@ def get_logger(app):
45
56
  feishu=feishu,
46
57
  dida=dida,
47
58
  mongo=get_mongo('alert_program')
48
- )
59
+ )
60
+
61
+ def get_logger_sls(app):
62
+ return AppLogger(
63
+ app_name=app,
64
+ enable_sls=True,
65
+ feishu=feishu,
66
+ dida=dida,
67
+ mongo=get_mongo('alert_program'),
68
+ sls_access_key_id=config['alicloud']['account1']['access_key_id'],
69
+ sls_access_key_secret=config['alicloud']['account1']['access_key_secret'],
70
+ sls_project=config['alicloud']['account1']['project'],
71
+ sls_logstore=config['alicloud']['account1']['logstore']
72
+ )
73
+
74
+ # ad_dev = ADClient(
75
+ # server=config['ad']['dev']['AD_SERVER'],
76
+ # base_dn=config['ad']['dev']['BASE_DN'],
77
+ # username=config['ad']['dev']['AD_USERNAME'],
78
+ # password=config['ad']['dev']['AD_PASSWORD']
79
+ # )
80
+
81
+ # ad_prod = ADClient(
82
+ # server=config['ad']['prod']['AD_SERVER'],
83
+ # base_dn=config['ad']['prod']['BASE_DN'],
84
+ # username=config['ad']['prod']['AD_USERNAME'],
85
+ # password=config['ad']['prod']['AD_PASSWORD']
86
+ # )
87
+
88
+ env = get_env_by_os_environment(check_key='ENV')
89
+ meraki = Meraki(api_key=config['meraki']['api_key'], organization_id=config['meraki']['organization_id'])
90
+
91
+ vmware_test = VMwareClient(
92
+ host=config['vmware']['test']['host'],
93
+ username=config['vmware']['test']['username'],
94
+ password=config['vmware']['test']['password'],
95
+ version=config['vmware']['test']['version'],
96
+ proxies=config['vmware']['test']['proxies']
97
+ )
98
+
99
+ pyjira = PyJira(
100
+ base_url=config['jira']['base_url'],
101
+ username=config['jira']['username'],
102
+ token=config['jira']['token']
103
+ )
104
+
105
+ # mail_163 = MailClient(mail_address=config['mail']['163']['mail_address'], password=config['mail']['163']['password'])
106
+ # mail_qq = MailClient(mail_address=config['mail']['qq']['mail_address'], password=config['mail']['qq']['password'])
107
+ # ali_mail = AliMail(mail_address=config['mail']['aliyun']['mail_address'], client_id=config['mail']['aliyun']['client_id'], client_secret=config['mail']['aliyun']['client_secret'])
108
+
109
+ sls = AliCloudSls(
110
+ access_key_id=config['alicloud']['account1']['access_key_id'],
111
+ access_key_secret=config['alicloud']['account1']['access_key_secret'],
112
+ project=config['alicloud']['account1']['project'],
113
+ logstore=config['alicloud']['account1']['logstore']
114
+ )
115
+
116
+ def get_cronjob_counter(app_type='', app='', comment=None, schedule_interval=None, schedule_cron=None):
117
+ return cronjob_counter(vm=vm, log=get_logger('cronjob_counter'), app_type=app_type, app=app, comment=comment, schedule_interval=schedule_interval, schedule_cron=schedule_cron)
118
+
119
+
120
+ notion = Notion(token=config['notion']['api_secrets'], proxy=config['notion']['proxy'])
121
+ mingdao = Mingdao(app_key=config['mingdao']['app_key'], sign=config['mingdao']['sign'])
@@ -3,7 +3,8 @@
3
3
  import os
4
4
  from pathlib import Path
5
5
  from pytbox.utils.load_config import load_config_by_file
6
-
6
+ from glob import glob
7
+ import os
7
8
  from jinja2 import Environment, FileSystemLoader
8
9
 
9
10
 
@@ -40,41 +41,103 @@ class BuildConfig:
40
41
  f.write(render_data)
41
42
 
42
43
  def ping(self):
43
- instances = self.instances['ping']['instance']
44
- render_data = ping_template.render(instances=instances)
45
- target_dir = Path(self.output_dir) / 'input.ping'
46
- if not target_dir.exists():
47
- target_dir.mkdir(parents=True, exist_ok=True)
48
-
49
- with open(Path(self.output_dir) / 'input.ping' / 'ping.toml', 'w', encoding='utf-8') as f:
50
- f.write(render_data)
44
+ if self.instances.get('ping'):
45
+ instances = self.instances['ping']['instance']
46
+ render_data = ping_template.render(instances=instances)
47
+ target_dir = Path(self.output_dir) / 'input.ping'
48
+ if not target_dir.exists():
49
+ target_dir.mkdir(parents=True, exist_ok=True)
50
+
51
+ with open(Path(self.output_dir) / 'input.ping' / 'ping.toml', 'w', encoding='utf-8') as f:
52
+ f.write(render_data)
51
53
 
52
54
  def prometheus(self):
53
- instances = self.instances['prometheus']['urls']
54
- render_data = prometheus_template.render(instances=instances)
55
- target_dir = Path(self.output_dir) / 'input.prometheus'
56
- if not target_dir.exists():
57
- target_dir.mkdir(parents=True, exist_ok=True)
58
- with open(Path(self.output_dir) / 'input.prometheus' / 'prometheus.toml', 'w', encoding='utf-8') as f:
59
- f.write(render_data)
55
+ if self.instances.get('prometheus'):
56
+ instances = self.instances['prometheus']['urls']
57
+ render_data = prometheus_template.render(instances=instances)
58
+ target_dir = Path(self.output_dir) / 'input.prometheus'
59
+ if not target_dir.exists():
60
+ target_dir.mkdir(parents=True, exist_ok=True)
61
+ with open(Path(self.output_dir) / 'input.prometheus' / 'prometheus.toml', 'w', encoding='utf-8') as f:
62
+ f.write(render_data)
60
63
 
61
64
  def vsphere(self):
62
- template = self._get_template('input.vsphere/vsphere.toml.j2')
63
- instances = self.instances['vsphere']['instance']
64
- print(instances)
65
- render_data = template.render(instances=instances)
66
- target_dir = Path(self.output_dir) / 'input.vsphere'
67
- if not target_dir.exists():
68
- target_dir.mkdir(parents=True, exist_ok=True)
69
- with open(Path(self.output_dir) / 'input.vsphere' / 'vsphere.toml', 'w', encoding='utf-8') as f:
70
- f.write(render_data)
65
+ if self.instances.get('vsphere'):
66
+ template = self._get_template('input.vsphere/vsphere.toml.j2')
67
+ instances = self.instances['vsphere']['instance']
68
+ render_data = template.render(instances=instances)
69
+ target_dir = Path(self.output_dir) / 'input.vsphere'
70
+ if not target_dir.exists():
71
+ target_dir.mkdir(parents=True, exist_ok=True)
72
+ with open(Path(self.output_dir) / 'input.vsphere' / 'vsphere.toml', 'w', encoding='utf-8') as f:
73
+ f.write(render_data)
74
+
75
+ def http_response(self):
76
+ template = self._get_template('input.http_response/http_response.toml.j2')
77
+ if self.instances.get('http_response'):
78
+ instances = self.instances['http_response']['instance']
79
+ render_data = template.render(instances=instances)
80
+ target_dir = Path(self.output_dir) / 'input.http_response'
81
+ if not target_dir.exists():
82
+ target_dir.mkdir(parents=True, exist_ok=True)
83
+ with open(Path(self.output_dir) / 'input.http_response' / 'http_response.toml', 'w', encoding='utf-8') as f:
84
+ f.write(render_data)
85
+
86
+ def net_response(self):
87
+ template = self._get_template('input.net_response/net_response.toml.j2')
88
+ if self.instances.get('net_response'):
89
+ instances = self.instances['net_response']['instance']
90
+ render_data = template.render(instances=instances)
91
+ target_dir = Path(self.output_dir) / 'input.net_response'
92
+ if not target_dir.exists():
93
+ target_dir.mkdir(parents=True, exist_ok=True)
94
+ with open(Path(self.output_dir) / 'input.net_response' / 'net_response.toml', 'w', encoding='utf-8') as f:
95
+ f.write(render_data)
96
+
97
+ def dns_query(self):
98
+ template = self._get_template('input.dns_query/dns_query.toml.j2')
99
+ if self.instances.get('dns_query'):
100
+ instances = self.instances['dns_query']['instance']
101
+ render_data = template.render(instances=instances)
102
+ target_dir = Path(self.output_dir) / 'input.dns_query'
103
+ if not target_dir.exists():
104
+ target_dir.mkdir(parents=True, exist_ok=True)
105
+ with open(Path(self.output_dir) / 'input.dns_query' / 'dns_query.toml', 'w', encoding='utf-8') as f:
106
+ f.write(render_data)
107
+
108
+ def snmp(self):
109
+ if self.instances.get('snmp'):
110
+ device_types = self.instances['snmp']['instances']
111
+ for device_type in device_types:
112
+ instances = self.instances['snmp']['instances'][device_type]
113
+ jinja2_dir = Path(jinja2_path) / 'input.snmp'
114
+ device_templates = glob(str(jinja2_dir / f'{device_type}_*.toml.j2'))
115
+ if not device_templates:
116
+ continue
117
+ for tmpl_path in device_templates:
118
+ tmpl_name = os.path.basename(tmpl_path)
119
+ base_name = tmpl_name.replace('.toml.j2', '')
120
+ template = self._get_template(f'input.snmp/{tmpl_name}')
121
+ render_data = template.render(instances=instances, config=self.instances['snmp']['config'])
122
+
123
+ target_dir = Path(self.output_dir) / 'input.snmp'
124
+ if not target_dir.exists():
125
+ target_dir.mkdir(parents=True, exist_ok=True)
126
+
127
+ output_file = target_dir / f'{base_name}.toml'
128
+ with open(output_file, 'w', encoding='utf-8') as f:
129
+ f.write(render_data)
71
130
 
72
131
  def run(self):
73
- # self.common('cpu')
74
- # self.common('mem')
75
- # self.common('net')
76
- # self.common('disk')
77
- # self.common('diskio')
132
+ self.common('cpu')
133
+ self.common('mem')
134
+ self.common('net')
135
+ self.common('disk')
136
+ self.common('diskio')
78
137
  self.vsphere()
79
- # self.ping()
80
- # self.prometheus()
138
+ self.ping()
139
+ self.prometheus()
140
+ self.http_response()
141
+ self.net_response()
142
+ self.dns_query()
143
+ self.snmp()
@@ -0,0 +1,39 @@
1
+ [ping]
2
+ [[ping.instance]]
3
+ "10.1.1.1" = { name = "x", env = "prod" }
4
+ "10.1.1.2" = { name = "demo02", env = "dev" }
5
+
6
+ [prometheus]
7
+ [[prometheus.urls]]
8
+ "http://10.1.1.1:9100" = { name = "x", env = "prod" }
9
+
10
+ [vsphere]
11
+ [[vsphere.instance]]
12
+ "test" = { vcenter = "https://10.1.1.1/sdk", username = "categraf@vsphere.local", password = "xxx" }
13
+
14
+ [http_response]
15
+ [[http_response.instance]]
16
+ "https://www.baidu.com" = { name = "x", env = "prod" }
17
+
18
+
19
+ [net_response]
20
+ [[net_response.instance]]
21
+ "124.74.245.90:8443" = { name = "x", env = "prod" }
22
+
23
+ [dns_query]
24
+ [[dns_query.instance]]
25
+ "119.29.29.29_baidu.com" = { dns_server = "119.29.29.29", domains = "www.baidu.com", labels = { name = "x", env = "prod" } }
26
+
27
+ [snmp]
28
+ [snmp.config]
29
+ timeout = '120s'
30
+ path = "/usr/share/snmp/mibs"
31
+ # 支持 h3c,huawei,cisco,ruijie
32
+ [[snmp.instances.h3c]]
33
+ "10.1.1.1:161" = { version = 2, community = "public" }
34
+ "10.1.1.2:161" = { version = 2, community = "public" }
35
+ "10.1.1.3:161" = { version = 3, sec_name = "sec", auth_protocol = "SHA", auth_password = "pass01", priv_protocol = "AES", priv_password = "pass02", sec_level = "authPriv" }
36
+
37
+ # [[snmp.instances.huawei]]
38
+ # "udp://172.16.1.1:161" = { version = 2, community = "public"}
39
+ # "udp://172.16.1.2:161" = { version = 2, community = "public"}
@@ -0,0 +1,6 @@
1
+ """
2
+ Categraf Jinja2 模板文件包
3
+ """
4
+
5
+ # 这个文件确保 jinja2 目录被 Python 识别为包
6
+ # 这样模板文件就会被包含在安装包中
@@ -0,0 +1,5 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # whether collect per cpu
5
+ # collect_per_cpu = false
@@ -0,0 +1,11 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # By default stats will be gathered for all mount points.
5
+ # # Set mount_points will restrict the stats to only the specified mount points.
6
+ # mount_points = ["/"]
7
+
8
+ # Ignore mount points by filesystem type.
9
+ ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs", "nsfs", "CDFS", "fuse.juicefs"]
10
+
11
+ ignore_mount_points = ["/boot", "/var/lib/kubelet/pods"]
@@ -0,0 +1,6 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # By default, categraf will gather stats for all devices including disk partitions.
5
+ # # Setting devices will restrict the stats to the specified devices.
6
+ # devices = ["sda", "sdb", "vd*"]
@@ -0,0 +1,12 @@
1
+ {% for instance in instances %}
2
+ {% for name, data in instance.items() %}
3
+ [[instances]]
4
+ # append some labels for series
5
+ labels = { {% for k, v in data['labels'].items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
6
+ servers = ["{{ data['dns_server'] }}"]
7
+ ## Domains or subdomains to query.
8
+ domains = ["{{ data['domains'] }}"]
9
+ record_type = "A"
10
+ timeout = 2
11
+ {% endfor %}
12
+ {% endfor %}
@@ -0,0 +1,9 @@
1
+ {% for instance in instances %}
2
+ {% for target, labels in instance.items() %}
3
+ [[instances]]
4
+ targets = [
5
+ "{{ target }}"
6
+ ]
7
+ labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
8
+ {% endfor %}
9
+ {% endfor %}
@@ -0,0 +1,5 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # whether collect platform specified metrics
5
+ collect_platform_fields = true
@@ -0,0 +1,11 @@
1
+ # # collect interval
2
+ # interval = 15
3
+
4
+ # # whether collect protocol stats on Linux
5
+ # collect_protocol_stats = false
6
+
7
+ # # setting interfaces will tell categraf to gather these explicit interfaces
8
+ # interfaces = ["eth0"]
9
+
10
+ # enable_loopback_stats=true
11
+ # enable_link_down_stats=true
@@ -0,0 +1,9 @@
1
+ {% for instance in instances %}
2
+ {% for target, labels in instance.items() %}
3
+ [[instances]]
4
+ targets = [
5
+ "{{ target }}"
6
+ ]
7
+ labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
8
+ {% endfor %}
9
+ {% endfor %}
@@ -0,0 +1,11 @@
1
+ {% for instance in instances %}
2
+ {% for target, labels in instance.items() %}
3
+ [[instances]]
4
+ targets = [
5
+ "{{ target }}"
6
+ ]
7
+ method = "exec"
8
+ count = 3
9
+ labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
10
+ {% endfor %}
11
+ {% endfor %}
@@ -0,0 +1,12 @@
1
+ {% for instance in instances %}
2
+ {% for instance, labels in instance.items() %}
3
+ [[instances]]
4
+ urls = [
5
+ "{{ instance }}"
6
+ ]
7
+
8
+ url_label_key = "instance"
9
+ url_label_value = "{{ '{{.Host}}' }}"
10
+ labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
11
+ {% endfor %}
12
+ {% endfor %}
@@ -0,0 +1,96 @@
1
+ {% for instance in instances %}
2
+ {% for agent, detail in instance.items() %}
3
+ [[instances]]
4
+ agents = [
5
+ "udp://{{agent}}"
6
+ ]
7
+ {% if detail.version == 2 %}
8
+ version = {{detail.version}}
9
+ community = "{{detail.community}}"
10
+ {% endif %}
11
+ {% if detail.version == 3 %}
12
+ version = {{detail.version}}
13
+ sec_name = "{{detail.sec_name}}"
14
+ auth_protocol = "{{detail.auth_protocol}}"
15
+ auth_password = "{{detail.auth_password}}"
16
+ priv_protocol = "{{detail.priv_protocol}}"
17
+ priv_password = "{{detail.priv_password}}"
18
+ sec_level = "{{detail.sec_level}}"
19
+ {% endif %}
20
+ timeout = "{{config.timeout}}"
21
+ retries = 3
22
+ path = ["{{config.path}}"]
23
+ translator = "gosmi"
24
+ max_repetitions = 50
25
+
26
+ [[instances.field]]
27
+ name = "sysName"
28
+ oid = "1.3.6.1.2.1.1.5.0"
29
+ is_tag = true
30
+
31
+ [[instances.table]]
32
+ name = "interface"
33
+ inherit_tags = ["sysName"]
34
+ index_as_tag = true
35
+
36
+ [[instances.table.field]]
37
+ name = "ifIndex"
38
+ oid = "1.3.6.1.2.1.2.2.1.1"
39
+ is_tag = true
40
+
41
+ [[instances.table.field]]
42
+ name = "ifName"
43
+ oid = "1.3.6.1.2.1.31.1.1.1.1"
44
+ is_tag = true
45
+
46
+ [[instances.table.field]]
47
+ name = "ifDescr"
48
+ oid = "1.3.6.1.2.1.2.2.1.2"
49
+ is_tag = true
50
+
51
+ [[instances.table.field]]
52
+ name = "ifSpeed"
53
+ oid = "1.3.6.1.2.1.2.2.1.5"
54
+ # is_tag = true
55
+
56
+ [[instances.table.field]]
57
+ name = "ifAdminStatus"
58
+ oid = "1.3.6.1.2.1.2.2.1.7"
59
+ # is_tag = true
60
+
61
+ [[instances.table.field]]
62
+ name = "ifOperStatus"
63
+ oid = "1.3.6.1.2.1.2.2.1.8"
64
+ # is_tag = true
65
+
66
+ [[instances.table.field]]
67
+ name = "ifInDiscards"
68
+ oid = "1.3.6.1.2.1.2.2.1.13"
69
+
70
+ [[instances.table.field]]
71
+ name = "ifInErrors"
72
+ oid = "1.3.6.1.2.1.2.2.1.14"
73
+
74
+ [[instances.table.field]]
75
+ name = "ifOutDiscards"
76
+ oid = "1.3.6.1.2.1.2.2.1.19"
77
+
78
+ [[instances.table.field]]
79
+ name = "ifOutErrors"
80
+ oid = "1.3.6.1.2.1.2.2.1.20"
81
+
82
+ [[instances.table.field]]
83
+ name = "ifAlias"
84
+ oid = "1.3.6.1.2.1.31.1.1.1.18"
85
+ is_tag = true
86
+
87
+ [[instances.table.field]]
88
+ name = "ifHCInOctets"
89
+ oid = "1.3.6.1.2.1.31.1.1.1.6"
90
+
91
+ [[instances.table.field]]
92
+ name = "ifHCOutOctets"
93
+ oid = "1.3.6.1.2.1.31.1.1.1.10"
94
+
95
+ {% endfor %}
96
+ {% endfor %}