pytbox 0.0.7__tar.gz → 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pytbox might be problematic. Click here for more details.
- pytbox-0.1.0/MANIFEST.in +15 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/PKG-INFO +2 -2
- {pytbox-0.0.7 → pytbox-0.1.0}/pyproject.toml +5 -2
- pytbox-0.1.0/src/pytbox/categraf/build_config.py +145 -0
- pytbox-0.1.0/src/pytbox/categraf/instances.toml +35 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/__init__.py +6 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.cpu/cpu.toml.j2 +5 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.disk/disk.toml.j2 +11 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.diskio/diskio.toml.j2 +6 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.dns_query/dns_query.toml.j2 +12 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.http_response/http_response.toml.j2 +9 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.mem/mem.toml.j2 +5 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.net/net.toml.j2 +11 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.net_response/net_response.toml.j2 +9 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.ping/ping.toml.j2 +11 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.prometheus/prometheus.toml.j2 +12 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.snmp/h3c_interface.toml.j2 +96 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.snmp/h3c_system.toml.j2 +41 -0
- pytbox-0.1.0/src/pytbox/categraf/jinja2/input.vsphere/vsphere.toml.j2 +211 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox.egg-info/PKG-INFO +2 -2
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox.egg-info/SOURCES.txt +16 -0
- pytbox-0.0.7/src/pytbox/categraf/build_config.py +0 -80
- {pytbox-0.0.7 → pytbox-0.1.0}/README.md +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/setup.cfg +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/alert/alert_handler.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/alert/ping.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/alicloud/sls.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/base.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli/__init__.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli/categraf/__init__.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli/categraf/commands.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli/common/__init__.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli/common/options.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli/common/utils.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli/formatters/__init__.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli/formatters/output.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli/main.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/cli.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/common/__init__.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/database/mongo.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/database/victoriametrics.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/dida365.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/feishu/client.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/feishu/endpoints.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/feishu/errors.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/feishu/helpers.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/feishu/typing.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/log/logger.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/log/victorialog.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/onepassword_connect.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/onepassword_sa.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/utils/env.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/utils/load_config.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/utils/ping_checker.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/utils/response.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/utils/richutils.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox/utils/timeutils.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox.egg-info/dependency_links.txt +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox.egg-info/entry_points.txt +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox.egg-info/requires.txt +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/src/pytbox.egg-info/top_level.txt +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/tests/test_base.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/tests/test_feishu.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/tests/test_logger.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/tests/test_onepassword_connect.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/tests/test_onepassword_sa.py +0 -0
- {pytbox-0.0.7 → pytbox-0.1.0}/tests/test_victoriametrics.py +0 -0
pytbox-0.1.0/MANIFEST.in
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# 包含所有模板文件
|
|
2
|
+
recursive-include src/pytbox/categraf/jinja2 *.j2
|
|
3
|
+
recursive-include src/pytbox/categraf *.toml
|
|
4
|
+
|
|
5
|
+
# 包含文档文件
|
|
6
|
+
include README.md
|
|
7
|
+
|
|
8
|
+
# 包含配置文件
|
|
9
|
+
include pyproject.toml
|
|
10
|
+
|
|
11
|
+
# 排除缓存和临时文件
|
|
12
|
+
global-exclude *.pyc
|
|
13
|
+
global-exclude __pycache__
|
|
14
|
+
global-exclude .git*
|
|
15
|
+
global-exclude .DS_Store
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pytbox
|
|
3
|
-
Version: 0.0
|
|
3
|
+
Version: 0.1.0
|
|
4
4
|
Summary: A collection of Python integrations and utilities (Feishu, Dida365, VictoriaMetrics, ...)
|
|
5
5
|
Author-email: mingming hou <houm01@foxmail.com>
|
|
6
|
-
License: MIT
|
|
6
|
+
License-Expression: MIT
|
|
7
7
|
Requires-Python: >=3.8
|
|
8
8
|
Description-Content-Type: text/markdown
|
|
9
9
|
Requires-Dist: requests>=2.0
|
|
@@ -4,10 +4,10 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pytbox"
|
|
7
|
-
version = "0.0
|
|
7
|
+
version = "0.1.0"
|
|
8
8
|
description = "A collection of Python integrations and utilities (Feishu, Dida365, VictoriaMetrics, ...)"
|
|
9
9
|
authors = [{ name = "mingming hou", email = "houm01@foxmail.com" }]
|
|
10
|
-
license =
|
|
10
|
+
license = "MIT"
|
|
11
11
|
readme = "README.md"
|
|
12
12
|
requires-python = ">=3.8"
|
|
13
13
|
|
|
@@ -34,5 +34,8 @@ cli = [
|
|
|
34
34
|
"toml>=0.10.0", # TOML 输出支持
|
|
35
35
|
]
|
|
36
36
|
|
|
37
|
+
[tool.setuptools]
|
|
38
|
+
include-package-data = true
|
|
39
|
+
|
|
37
40
|
[tool.setuptools.packages.find]
|
|
38
41
|
where = ["src"]
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from pytbox.utils.load_config import load_config_by_file
|
|
6
|
+
from glob import glob
|
|
7
|
+
import os
|
|
8
|
+
from jinja2 import Environment, FileSystemLoader
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
jinja2_path = Path(__file__).parent / 'jinja2'
|
|
12
|
+
env = Environment(loader=FileSystemLoader(jinja2_path))
|
|
13
|
+
|
|
14
|
+
ping_template = env.get_template('input.ping/ping.toml.j2')
|
|
15
|
+
prometheus_template = env.get_template('input.prometheus/prometheus.toml.j2')
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class BuildConfig:
|
|
19
|
+
'''
|
|
20
|
+
生成配置
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
instances (_type_): _description_
|
|
24
|
+
output_dir (_type_): _description_
|
|
25
|
+
'''
|
|
26
|
+
def __init__(self, instances, output_dir):
|
|
27
|
+
self.instances = load_config_by_file(instances)
|
|
28
|
+
self.output_dir = output_dir
|
|
29
|
+
|
|
30
|
+
def _get_template(self, template_name):
|
|
31
|
+
return env.get_template(template_name)
|
|
32
|
+
|
|
33
|
+
def common(self, input_name):
|
|
34
|
+
template = self._get_template(f'input.{input_name}/{input_name}.toml.j2')
|
|
35
|
+
render_data = template.render()
|
|
36
|
+
target_dir = Path(self.output_dir) / f'input.{input_name}'
|
|
37
|
+
if not target_dir.exists():
|
|
38
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
39
|
+
|
|
40
|
+
with open(Path(self.output_dir) / f'input.{input_name}' / f'{input_name}.toml', 'w', encoding='utf-8') as f:
|
|
41
|
+
f.write(render_data)
|
|
42
|
+
|
|
43
|
+
def ping(self):
|
|
44
|
+
instances = self.instances['ping']['instance']
|
|
45
|
+
render_data = ping_template.render(instances=instances)
|
|
46
|
+
target_dir = Path(self.output_dir) / 'input.ping'
|
|
47
|
+
if not target_dir.exists():
|
|
48
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
49
|
+
|
|
50
|
+
with open(Path(self.output_dir) / 'input.ping' / 'ping.toml', 'w', encoding='utf-8') as f:
|
|
51
|
+
f.write(render_data)
|
|
52
|
+
|
|
53
|
+
def prometheus(self):
|
|
54
|
+
instances = self.instances['prometheus']['urls']
|
|
55
|
+
render_data = prometheus_template.render(instances=instances)
|
|
56
|
+
target_dir = Path(self.output_dir) / 'input.prometheus'
|
|
57
|
+
if not target_dir.exists():
|
|
58
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
59
|
+
with open(Path(self.output_dir) / 'input.prometheus' / 'prometheus.toml', 'w', encoding='utf-8') as f:
|
|
60
|
+
f.write(render_data)
|
|
61
|
+
|
|
62
|
+
def vsphere(self):
|
|
63
|
+
template = self._get_template('input.vsphere/vsphere.toml.j2')
|
|
64
|
+
instances = self.instances['vsphere']['instance']
|
|
65
|
+
render_data = template.render(instances=instances)
|
|
66
|
+
target_dir = Path(self.output_dir) / 'input.vsphere'
|
|
67
|
+
if not target_dir.exists():
|
|
68
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
69
|
+
with open(Path(self.output_dir) / 'input.vsphere' / 'vsphere.toml', 'w', encoding='utf-8') as f:
|
|
70
|
+
f.write(render_data)
|
|
71
|
+
|
|
72
|
+
def http_response(self):
|
|
73
|
+
template = self._get_template('input.http_response/http_response.toml.j2')
|
|
74
|
+
if self.instances.get('http_response'):
|
|
75
|
+
instances = self.instances['http_response']['instance']
|
|
76
|
+
render_data = template.render(instances=instances)
|
|
77
|
+
target_dir = Path(self.output_dir) / 'input.http_response'
|
|
78
|
+
if not target_dir.exists():
|
|
79
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
80
|
+
with open(Path(self.output_dir) / 'input.http_response' / 'http_response.toml', 'w', encoding='utf-8') as f:
|
|
81
|
+
f.write(render_data)
|
|
82
|
+
|
|
83
|
+
def net_response(self):
|
|
84
|
+
template = self._get_template('input.net_response/net_response.toml.j2')
|
|
85
|
+
if self.instances.get('net_response'):
|
|
86
|
+
instances = self.instances['net_response']['instance']
|
|
87
|
+
render_data = template.render(instances=instances)
|
|
88
|
+
target_dir = Path(self.output_dir) / 'input.net_response'
|
|
89
|
+
if not target_dir.exists():
|
|
90
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
91
|
+
with open(Path(self.output_dir) / 'input.net_response' / 'net_response.toml', 'w', encoding='utf-8') as f:
|
|
92
|
+
f.write(render_data)
|
|
93
|
+
|
|
94
|
+
def dns_query(self):
|
|
95
|
+
template = self._get_template('input.dns_query/dns_query.toml.j2')
|
|
96
|
+
if self.instances.get('dns_query'):
|
|
97
|
+
instances = self.instances['dns_query']['instance']
|
|
98
|
+
render_data = template.render(instances=instances)
|
|
99
|
+
target_dir = Path(self.output_dir) / 'input.dns_query'
|
|
100
|
+
if not target_dir.exists():
|
|
101
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
102
|
+
with open(Path(self.output_dir) / 'input.dns_query' / 'dns_query.toml', 'w', encoding='utf-8') as f:
|
|
103
|
+
f.write(render_data)
|
|
104
|
+
|
|
105
|
+
def snmp(self):
|
|
106
|
+
if self.instances.get('snmp'):
|
|
107
|
+
device_types = self.instances['snmp']['instances']
|
|
108
|
+
for device_type in device_types:
|
|
109
|
+
instances = self.instances['snmp']['instances'][device_type]
|
|
110
|
+
jinja2_dir = Path(jinja2_path) / 'input.snmp'
|
|
111
|
+
device_templates = glob(str(jinja2_dir / f'{device_type}_*.toml.j2'))
|
|
112
|
+
if not device_templates:
|
|
113
|
+
continue
|
|
114
|
+
|
|
115
|
+
for tmpl_path in device_templates:
|
|
116
|
+
tmpl_name = os.path.basename(tmpl_path)
|
|
117
|
+
# 例如 h3c_system.toml.j2 -> h3c_system
|
|
118
|
+
base_name = tmpl_name.replace('.toml.j2', '')
|
|
119
|
+
|
|
120
|
+
template = self._get_template(f'input.snmp/{tmpl_name}')
|
|
121
|
+
# 修复数据结构:模板期望的是数组,每个元素是字典
|
|
122
|
+
# instances 是 [{"udp://10.1.1.1:161": {...}, "udp://10.1.1.2:161": {...}}, ...]
|
|
123
|
+
render_data = template.render(instances=instances)
|
|
124
|
+
|
|
125
|
+
target_dir = Path(self.output_dir) / 'input.snmp'
|
|
126
|
+
if not target_dir.exists():
|
|
127
|
+
target_dir.mkdir(parents=True, exist_ok=True)
|
|
128
|
+
|
|
129
|
+
output_file = target_dir / f'{base_name}.toml'
|
|
130
|
+
with open(output_file, 'w', encoding='utf-8') as f:
|
|
131
|
+
f.write(render_data)
|
|
132
|
+
|
|
133
|
+
def run(self):
|
|
134
|
+
self.common('cpu')
|
|
135
|
+
self.common('mem')
|
|
136
|
+
self.common('net')
|
|
137
|
+
self.common('disk')
|
|
138
|
+
self.common('diskio')
|
|
139
|
+
self.vsphere()
|
|
140
|
+
self.ping()
|
|
141
|
+
self.prometheus()
|
|
142
|
+
self.http_response()
|
|
143
|
+
self.net_response()
|
|
144
|
+
self.dns_query()
|
|
145
|
+
self.snmp()
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
[ping]
|
|
2
|
+
[[ping.instance]]
|
|
3
|
+
"10.1.1.1" = { name = "x", env = "prod" }
|
|
4
|
+
"10.1.1.2" = { name = "demo02", env = "dev" }
|
|
5
|
+
|
|
6
|
+
[prometheus]
|
|
7
|
+
[[prometheus.urls]]
|
|
8
|
+
"http://10.200.12.202:9100" = { name = "x", env = "prod" }
|
|
9
|
+
|
|
10
|
+
[vsphere]
|
|
11
|
+
[[vsphere.instance]]
|
|
12
|
+
"test" = { vcenter = "https://10.1.1.1/sdk", username = "categraf@vsphere.local", password = "xxx" }
|
|
13
|
+
|
|
14
|
+
[http_response]
|
|
15
|
+
[[http_response.instance]]
|
|
16
|
+
"https://www.baidu.com" = { name = "x", env = "prod" }
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
[net_response]
|
|
20
|
+
[[net_response.instance]]
|
|
21
|
+
"124.74.245.90:8443" = { name = "x", env = "prod" }
|
|
22
|
+
|
|
23
|
+
[dns_query]
|
|
24
|
+
[[dns_query.instance]]
|
|
25
|
+
"119.29.29.29_baidu.com" = { dns_server = "119.29.29.29", domains = "www.baidu.com", labels = { name = "x", env = "prod" } }
|
|
26
|
+
|
|
27
|
+
[snmp]
|
|
28
|
+
[[snmp.instances.h3c]]
|
|
29
|
+
"10.1.1.1:161" = { version = 2, community = "public" }
|
|
30
|
+
"10.1.1.2:161" = { version = 2, community = "public" }
|
|
31
|
+
"10.1.1.3:161" = { version = 3, sec_name = "sec", auth_protocol = "SHA", auth_password = "pass01", priv_protocol = "AES", priv_password = "pass02", sec_level = "authPriv" }
|
|
32
|
+
|
|
33
|
+
# [[snmp.instances.huawei]]
|
|
34
|
+
# "udp://172.16.1.1:161" = { version = 2, community = "public"}
|
|
35
|
+
# "udp://172.16.1.2:161" = { version = 2, community = "public"}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# # collect interval
|
|
2
|
+
# interval = 15
|
|
3
|
+
|
|
4
|
+
# # By default stats will be gathered for all mount points.
|
|
5
|
+
# # Set mount_points will restrict the stats to only the specified mount points.
|
|
6
|
+
# mount_points = ["/"]
|
|
7
|
+
|
|
8
|
+
# Ignore mount points by filesystem type.
|
|
9
|
+
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs", "nsfs", "CDFS", "fuse.juicefs"]
|
|
10
|
+
|
|
11
|
+
ignore_mount_points = ["/boot", "/var/lib/kubelet/pods"]
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
{% for instance in instances %}
|
|
2
|
+
{% for name, data in instance.items() %}
|
|
3
|
+
[[instances]]
|
|
4
|
+
# append some labels for series
|
|
5
|
+
labels = { {% for k, v in data['labels'].items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
|
|
6
|
+
servers = ["{{ data['dns_server'] }}"]
|
|
7
|
+
## Domains or subdomains to query.
|
|
8
|
+
domains = ["{{ data['domains'] }}"]
|
|
9
|
+
record_type = "A"
|
|
10
|
+
timeout = 2
|
|
11
|
+
{% endfor %}
|
|
12
|
+
{% endfor %}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
{% for instance in instances %}
|
|
2
|
+
{% for target, labels in instance.items() %}
|
|
3
|
+
[[instances]]
|
|
4
|
+
targets = [
|
|
5
|
+
"{{ target }}"
|
|
6
|
+
]
|
|
7
|
+
labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
|
|
8
|
+
{% endfor %}
|
|
9
|
+
{% endfor %}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# # collect interval
|
|
2
|
+
# interval = 15
|
|
3
|
+
|
|
4
|
+
# # whether collect protocol stats on Linux
|
|
5
|
+
# collect_protocol_stats = false
|
|
6
|
+
|
|
7
|
+
# # setting interfaces will tell categraf to gather these explicit interfaces
|
|
8
|
+
# interfaces = ["eth0"]
|
|
9
|
+
|
|
10
|
+
# enable_loopback_stats=true
|
|
11
|
+
# enable_link_down_stats=true
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
{% for instance in instances %}
|
|
2
|
+
{% for target, labels in instance.items() %}
|
|
3
|
+
[[instances]]
|
|
4
|
+
targets = [
|
|
5
|
+
"{{ target }}"
|
|
6
|
+
]
|
|
7
|
+
labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
|
|
8
|
+
{% endfor %}
|
|
9
|
+
{% endfor %}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
{% for instance in instances %}
|
|
2
|
+
{% for target, labels in instance.items() %}
|
|
3
|
+
[[instances]]
|
|
4
|
+
targets = [
|
|
5
|
+
"{{ target }}"
|
|
6
|
+
]
|
|
7
|
+
method = "exec"
|
|
8
|
+
count = 3
|
|
9
|
+
labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
|
|
10
|
+
{% endfor %}
|
|
11
|
+
{% endfor %}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
{% for instance in instances %}
|
|
2
|
+
{% for instance, labels in instance.items() %}
|
|
3
|
+
[[instances]]
|
|
4
|
+
urls = [
|
|
5
|
+
"{{ instance }}"
|
|
6
|
+
]
|
|
7
|
+
|
|
8
|
+
url_label_key = "instance"
|
|
9
|
+
url_label_value = "{{ '{{.Host}}' }}"
|
|
10
|
+
labels = { {% for k, v in labels.items() %}{% if not loop.first %}, {% endif %}{% if k.isidentifier() %}{{ k }}{% else %}"{{ k }}"{% endif %} = "{{ v }}"{% endfor %} }
|
|
11
|
+
{% endfor %}
|
|
12
|
+
{% endfor %}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
{% for instance in instances %}
|
|
2
|
+
{% for agent, detail in instance.items() %}
|
|
3
|
+
[[instances]]
|
|
4
|
+
agents = [
|
|
5
|
+
"udp://{{agent}}"
|
|
6
|
+
]
|
|
7
|
+
{% if detail.version == 2 %}
|
|
8
|
+
version = {{detail.version}}
|
|
9
|
+
community = "{{detail.community}}"
|
|
10
|
+
{% endif %}
|
|
11
|
+
{% if detail.version == 3 %}
|
|
12
|
+
version = {{detail.version}}
|
|
13
|
+
sec_name = "{{detail.sec_name}}"
|
|
14
|
+
auth_protocol = "{{detail.auth_protocol}}"
|
|
15
|
+
auth_password = "{{detail.auth_password}}"
|
|
16
|
+
priv_protocol = "{{detail.priv_protocol}}"
|
|
17
|
+
priv_password = "{{detail.priv_password}}"
|
|
18
|
+
sec_level = "{{detail.sec_level}}"
|
|
19
|
+
{% endif %}
|
|
20
|
+
timeout = "5s"
|
|
21
|
+
retries = 3
|
|
22
|
+
path = ["/usr/share/snmp/mibs"]
|
|
23
|
+
translator = "gosmi"
|
|
24
|
+
max_repetitions = 50
|
|
25
|
+
|
|
26
|
+
[[instances.field]]
|
|
27
|
+
name = "sysName"
|
|
28
|
+
oid = "1.3.6.1.2.1.1.5.0"
|
|
29
|
+
is_tag = true
|
|
30
|
+
|
|
31
|
+
[[instances.table]]
|
|
32
|
+
name = "interface"
|
|
33
|
+
inherit_tags = ["sysName"]
|
|
34
|
+
index_as_tag = true
|
|
35
|
+
|
|
36
|
+
[[instances.table.field]]
|
|
37
|
+
name = "ifIndex"
|
|
38
|
+
oid = "1.3.6.1.2.1.2.2.1.1"
|
|
39
|
+
is_tag = true
|
|
40
|
+
|
|
41
|
+
[[instances.table.field]]
|
|
42
|
+
name = "ifName"
|
|
43
|
+
oid = "1.3.6.1.2.1.31.1.1.1.1"
|
|
44
|
+
is_tag = true
|
|
45
|
+
|
|
46
|
+
[[instances.table.field]]
|
|
47
|
+
name = "ifDescr"
|
|
48
|
+
oid = "1.3.6.1.2.1.2.2.1.2"
|
|
49
|
+
is_tag = true
|
|
50
|
+
|
|
51
|
+
[[instances.table.field]]
|
|
52
|
+
name = "ifSpeed"
|
|
53
|
+
oid = "1.3.6.1.2.1.2.2.1.5"
|
|
54
|
+
# is_tag = true
|
|
55
|
+
|
|
56
|
+
[[instances.table.field]]
|
|
57
|
+
name = "ifAdminStatus"
|
|
58
|
+
oid = "1.3.6.1.2.1.2.2.1.7"
|
|
59
|
+
# is_tag = true
|
|
60
|
+
|
|
61
|
+
[[instances.table.field]]
|
|
62
|
+
name = "ifOperStatus"
|
|
63
|
+
oid = "1.3.6.1.2.1.2.2.1.8"
|
|
64
|
+
# is_tag = true
|
|
65
|
+
|
|
66
|
+
[[instances.table.field]]
|
|
67
|
+
name = "ifInDiscards"
|
|
68
|
+
oid = "1.3.6.1.2.1.2.2.1.13"
|
|
69
|
+
|
|
70
|
+
[[instances.table.field]]
|
|
71
|
+
name = "ifInErrors"
|
|
72
|
+
oid = "1.3.6.1.2.1.2.2.1.14"
|
|
73
|
+
|
|
74
|
+
[[instances.table.field]]
|
|
75
|
+
name = "ifOutDiscards"
|
|
76
|
+
oid = "1.3.6.1.2.1.2.2.1.19"
|
|
77
|
+
|
|
78
|
+
[[instances.table.field]]
|
|
79
|
+
name = "ifOutErrors"
|
|
80
|
+
oid = "1.3.6.1.2.1.2.2.1.20"
|
|
81
|
+
|
|
82
|
+
[[instances.table.field]]
|
|
83
|
+
name = "ifAlias"
|
|
84
|
+
oid = "1.3.6.1.2.1.31.1.1.1.18"
|
|
85
|
+
is_tag = true
|
|
86
|
+
|
|
87
|
+
[[instances.table.field]]
|
|
88
|
+
name = "ifHCInOctets"
|
|
89
|
+
oid = "1.3.6.1.2.1.31.1.1.1.6"
|
|
90
|
+
|
|
91
|
+
[[instances.table.field]]
|
|
92
|
+
name = "ifHCOutOctets"
|
|
93
|
+
oid = "1.3.6.1.2.1.31.1.1.1.10"
|
|
94
|
+
|
|
95
|
+
{% endfor %}
|
|
96
|
+
{% endfor %}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
{% for instance in instances %}
|
|
2
|
+
{% for agent, detail in instance.items() %}
|
|
3
|
+
[[instances]]
|
|
4
|
+
agents = [
|
|
5
|
+
"udp://{{agent}}"
|
|
6
|
+
]
|
|
7
|
+
{% if detail.version == 2 %}
|
|
8
|
+
version = {{detail.version}}
|
|
9
|
+
community = "{{detail.community}}"
|
|
10
|
+
{% endif %}
|
|
11
|
+
{% if detail.version == 3 %}
|
|
12
|
+
version = {{detail.version}}
|
|
13
|
+
sec_name = "{{detail.sec_name}}"
|
|
14
|
+
auth_protocol = "{{detail.auth_protocol}}"
|
|
15
|
+
auth_password = "{{detail.auth_password}}"
|
|
16
|
+
priv_protocol = "{{detail.priv_protocol}}"
|
|
17
|
+
priv_password = "{{detail.priv_password}}"
|
|
18
|
+
sec_level = "{{detail.sec_level}}"
|
|
19
|
+
{% endif %}
|
|
20
|
+
timeout = "5s"
|
|
21
|
+
retries = 3
|
|
22
|
+
path = ["/usr/share/snmp/mibs"]
|
|
23
|
+
translator = "gosmi"
|
|
24
|
+
max_repetitions = 50
|
|
25
|
+
|
|
26
|
+
[[instances.field]]
|
|
27
|
+
name = "sysName"
|
|
28
|
+
oid = "1.3.6.1.2.1.1.5.0"
|
|
29
|
+
is_tag = true
|
|
30
|
+
|
|
31
|
+
[[instances.field]]
|
|
32
|
+
oid = "1.3.6.1.2.1.1.1.0"
|
|
33
|
+
name = "sysDescr"
|
|
34
|
+
is_tag = true
|
|
35
|
+
|
|
36
|
+
[[instances.field]]
|
|
37
|
+
oid = "1.3.6.1.2.1.1.3.0"
|
|
38
|
+
name = "sysUpTime"
|
|
39
|
+
conversion = "float(2)"
|
|
40
|
+
{% endfor %}
|
|
41
|
+
{% endfor %}
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
# # collect interval
|
|
2
|
+
# interval = 15
|
|
3
|
+
|
|
4
|
+
# Read metrics from one or many vCenters
|
|
5
|
+
{% for instance in instances %}
|
|
6
|
+
{% for name, vcenter_info in instance.items() %}
|
|
7
|
+
[[instances]]
|
|
8
|
+
vcenter = "{{ vcenter_info['vcenter'] }}"
|
|
9
|
+
username = "{{ vcenter_info['username'] }}"
|
|
10
|
+
password = "{{ vcenter_info['password'] }}"
|
|
11
|
+
|
|
12
|
+
cluster_metric_exlcude = ["*"]
|
|
13
|
+
## VMs
|
|
14
|
+
## Typical VM metrics (if omitted or empty, all metrics are collected)
|
|
15
|
+
# vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected)
|
|
16
|
+
# vm_exclude = [] # Inventory paths to exclude
|
|
17
|
+
vm_metric_include = [
|
|
18
|
+
# "config.hardware.numCPU",
|
|
19
|
+
# "config.hardware.memoryMB",
|
|
20
|
+
"cpu.demand.average",
|
|
21
|
+
"cpu.idle.summation",
|
|
22
|
+
"cpu.latency.average",
|
|
23
|
+
"cpu.readiness.average",
|
|
24
|
+
"cpu.ready.summation",
|
|
25
|
+
"cpu.run.summation",
|
|
26
|
+
"cpu.usagemhz.average",
|
|
27
|
+
"cpu.usage.average",
|
|
28
|
+
"cpu.used.summation",
|
|
29
|
+
"cpu.wait.summation",
|
|
30
|
+
"mem.active.average",
|
|
31
|
+
"mem.granted.average",
|
|
32
|
+
"mem.latency.average",
|
|
33
|
+
"mem.swapin.average",
|
|
34
|
+
"mem.swapinRate.average",
|
|
35
|
+
"mem.swapout.average",
|
|
36
|
+
"mem.swapoutRate.average",
|
|
37
|
+
"mem.usage.average",
|
|
38
|
+
"mem.vmmemctl.average",
|
|
39
|
+
"net.bytesRx.average",
|
|
40
|
+
"net.bytesTx.average",
|
|
41
|
+
"net.droppedRx.summation",
|
|
42
|
+
"net.droppedTx.summation",
|
|
43
|
+
"net.usage.average",
|
|
44
|
+
"power.power.average",
|
|
45
|
+
"virtualDisk.numberReadAveraged.average",
|
|
46
|
+
"virtualDisk.numberWriteAveraged.average",
|
|
47
|
+
"virtualDisk.read.average",
|
|
48
|
+
"virtualDisk.readOIO.latest",
|
|
49
|
+
"virtualDisk.throughput.usage.average",
|
|
50
|
+
"virtualDisk.totalReadLatency.average",
|
|
51
|
+
"virtualDisk.totalWriteLatency.average",
|
|
52
|
+
"virtualDisk.write.average",
|
|
53
|
+
"virtualDisk.writeOIO.latest",
|
|
54
|
+
"sys.uptime.latest",
|
|
55
|
+
]
|
|
56
|
+
# vm_metric_exclude = [] ## Nothing is excluded by default
|
|
57
|
+
# vm_instances = true ## true by default
|
|
58
|
+
|
|
59
|
+
## Hosts
|
|
60
|
+
## Typical host metrics (if omitted or empty, all metrics are collected)
|
|
61
|
+
# host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected)
|
|
62
|
+
# host_exclude [] # Inventory paths to exclude
|
|
63
|
+
host_metric_include = [
|
|
64
|
+
"cpu.coreUtilization.average",
|
|
65
|
+
"cpu.costop.summation",
|
|
66
|
+
"cpu.demand.average",
|
|
67
|
+
"cpu.idle.summation",
|
|
68
|
+
"cpu.latency.average",
|
|
69
|
+
"cpu.readiness.average",
|
|
70
|
+
"cpu.ready.summation",
|
|
71
|
+
"cpu.swapwait.summation",
|
|
72
|
+
"cpu.usage.average",
|
|
73
|
+
"cpu.usagemhz.average",
|
|
74
|
+
"cpu.used.summation",
|
|
75
|
+
"cpu.utilization.average",
|
|
76
|
+
"cpu.wait.summation",
|
|
77
|
+
"disk.deviceReadLatency.average",
|
|
78
|
+
"disk.deviceWriteLatency.average",
|
|
79
|
+
"disk.kernelReadLatency.average",
|
|
80
|
+
"disk.kernelWriteLatency.average",
|
|
81
|
+
"disk.numberReadAveraged.average",
|
|
82
|
+
"disk.numberWriteAveraged.average",
|
|
83
|
+
"disk.read.average",
|
|
84
|
+
"disk.totalReadLatency.average",
|
|
85
|
+
"disk.totalWriteLatency.average",
|
|
86
|
+
"disk.write.average",
|
|
87
|
+
"mem.active.average",
|
|
88
|
+
"mem.latency.average",
|
|
89
|
+
"mem.state.latest",
|
|
90
|
+
"mem.swapin.average",
|
|
91
|
+
"mem.swapinRate.average",
|
|
92
|
+
"mem.swapout.average",
|
|
93
|
+
"mem.swapoutRate.average",
|
|
94
|
+
"mem.totalCapacity.average",
|
|
95
|
+
"mem.usage.average",
|
|
96
|
+
"mem.vmmemctl.average",
|
|
97
|
+
"net.bytesRx.average",
|
|
98
|
+
"net.bytesTx.average",
|
|
99
|
+
"net.droppedRx.summation",
|
|
100
|
+
"net.droppedTx.summation",
|
|
101
|
+
"net.errorsRx.summation",
|
|
102
|
+
"net.errorsTx.summation",
|
|
103
|
+
"net.usage.average",
|
|
104
|
+
"power.power.average",
|
|
105
|
+
"storageAdapter.numberReadAveraged.average",
|
|
106
|
+
"storageAdapter.numberWriteAveraged.average",
|
|
107
|
+
"storageAdapter.read.average",
|
|
108
|
+
"storageAdapter.write.average",
|
|
109
|
+
"sys.uptime.latest",
|
|
110
|
+
]
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
# host_instances = true ## true by default
|
|
114
|
+
# host_include = [] ## Nothing included by default
|
|
115
|
+
# host_exclude = [] ## Nothing excluded by default
|
|
116
|
+
# host_metric_include = [] ## Nothing included by default
|
|
117
|
+
# host_metric_exclude = [] ## Nothing excluded by default
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
## Clusters
|
|
121
|
+
# cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
|
|
122
|
+
# cluster_exclude = [] # Inventory paths to exclude
|
|
123
|
+
# cluster_metric_include = [] ## if omitted or empty, all metrics are collected
|
|
124
|
+
# cluster_metric_exclude = [] ## Nothing excluded by default
|
|
125
|
+
# cluster_instances = false ## false by default
|
|
126
|
+
|
|
127
|
+
## Resource Pools
|
|
128
|
+
# resoucepool_include = [ "/*/host/**"] # Inventory path to datastores to collect (by default all are collected)
|
|
129
|
+
# resoucepool_exclude = [] # Inventory paths to exclude
|
|
130
|
+
# resoucepool_metric_include = [] ## if omitted or empty, all metrics are collected
|
|
131
|
+
# resoucepool_metric_exclude = [] ## Nothing excluded by default
|
|
132
|
+
# resoucepool_instances = false ## false by default
|
|
133
|
+
|
|
134
|
+
## Datastores
|
|
135
|
+
# datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
|
|
136
|
+
# datastore_exclude = [] # Inventory paths to exclude
|
|
137
|
+
# datastore_metric_include = [] ## if omitted or empty, all metrics are collected
|
|
138
|
+
# datastore_metric_exclude = [] ## Nothing excluded by default
|
|
139
|
+
# datastore_instances = false ## false by default
|
|
140
|
+
|
|
141
|
+
## Datacenters
|
|
142
|
+
# datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
|
|
143
|
+
# datacenter_exclude = [] # Inventory paths to exclude
|
|
144
|
+
# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
|
|
145
|
+
# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
|
|
146
|
+
# datacenter_instances = false ## false by default
|
|
147
|
+
|
|
148
|
+
## Plugin Settings
|
|
149
|
+
## separator character to use for measurement and field names (default: "_")
|
|
150
|
+
# separator = "_"
|
|
151
|
+
|
|
152
|
+
## Collect IP addresses? Valid values are "ipv4" and "ipv6"
|
|
153
|
+
# ip_addresses = ["ipv6", "ipv4" ]
|
|
154
|
+
|
|
155
|
+
## When set to true, all samples are sent as integers. This makes the output
|
|
156
|
+
## data types backwards compatible with Telegraf 1.9 or lower. Normally all
|
|
157
|
+
## samples from vCenter, with the exception of percentages, are integer
|
|
158
|
+
## values, but under some conditions, some averaging takes place internally in
|
|
159
|
+
## the plugin. Setting this flag to "false" will send values as floats to
|
|
160
|
+
## preserve the full precision when averaging takes place.
|
|
161
|
+
# use_int_samples = true
|
|
162
|
+
|
|
163
|
+
## Custom attributes from vCenter can be very useful for queries in order to slice the
|
|
164
|
+
## metrics along different dimension and for forming ad-hoc relationships. They are disabled
|
|
165
|
+
## by default, since they can add a considerable amount of tags to the resulting metrics. To
|
|
166
|
+
## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
|
|
167
|
+
## to select the attributes you want to include.
|
|
168
|
+
## By default, since they can add a considerable amount of tags to the resulting metrics. To
|
|
169
|
+
## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
|
|
170
|
+
## to select the attributes you want to include.
|
|
171
|
+
# custom_attribute_include = []
|
|
172
|
+
# custom_attribute_exclude = ["*"]
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In
|
|
176
|
+
## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported
|
|
177
|
+
## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing
|
|
178
|
+
## it too much may cause performance issues.
|
|
179
|
+
# metric_lookback = 3
|
|
180
|
+
|
|
181
|
+
## number of objects to retrieve per query for realtime resources (vms and hosts)
|
|
182
|
+
## set to 64 for vCenter 5.5 and 6.0 (default: 256)
|
|
183
|
+
# max_query_objects = 256
|
|
184
|
+
|
|
185
|
+
## number of metrics to retrieve per query for non-realtime resources (clusters and datastores)
|
|
186
|
+
## set to 64 for vCenter 5.5 and 6.0 (default: 256)
|
|
187
|
+
# max_query_metrics = 256
|
|
188
|
+
|
|
189
|
+
## number of go routines to use for collection and discovery of objects and metrics
|
|
190
|
+
# collect_concurrency = 1
|
|
191
|
+
# discover_concurrency = 1
|
|
192
|
+
|
|
193
|
+
## the interval before (re)discovering objects subject to metrics collection (default: 300s)
|
|
194
|
+
# object_discovery_interval = "300s"
|
|
195
|
+
|
|
196
|
+
## timeout applies to any of the api request made to vcenter
|
|
197
|
+
# timeout = "60s"
|
|
198
|
+
|
|
199
|
+
## Optional SSL Config
|
|
200
|
+
use_tls = true
|
|
201
|
+
# tls_ca = "/path/to/cafile"
|
|
202
|
+
# tls_cert = "/path/to/certfile"
|
|
203
|
+
# tls_key = "/path/to/keyfile"
|
|
204
|
+
## Use SSL but skip chain & host verification
|
|
205
|
+
insecure_skip_verify = true
|
|
206
|
+
|
|
207
|
+
## The Historical Interval value must match EXACTLY the interval in the daily
|
|
208
|
+
# "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals
|
|
209
|
+
# historical_interval = "5m"
|
|
210
|
+
{% endfor %}
|
|
211
|
+
{% endfor %}
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pytbox
|
|
3
|
-
Version: 0.0
|
|
3
|
+
Version: 0.1.0
|
|
4
4
|
Summary: A collection of Python integrations and utilities (Feishu, Dida365, VictoriaMetrics, ...)
|
|
5
5
|
Author-email: mingming hou <houm01@foxmail.com>
|
|
6
|
-
License: MIT
|
|
6
|
+
License-Expression: MIT
|
|
7
7
|
Requires-Python: >=3.8
|
|
8
8
|
Description-Content-Type: text/markdown
|
|
9
9
|
Requires-Dist: requests>=2.0
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
MANIFEST.in
|
|
1
2
|
README.md
|
|
2
3
|
pyproject.toml
|
|
3
4
|
src/pytbox/base.py
|
|
@@ -15,6 +16,21 @@ src/pytbox/alert/alert_handler.py
|
|
|
15
16
|
src/pytbox/alert/ping.py
|
|
16
17
|
src/pytbox/alicloud/sls.py
|
|
17
18
|
src/pytbox/categraf/build_config.py
|
|
19
|
+
src/pytbox/categraf/instances.toml
|
|
20
|
+
src/pytbox/categraf/jinja2/__init__.py
|
|
21
|
+
src/pytbox/categraf/jinja2/input.cpu/cpu.toml.j2
|
|
22
|
+
src/pytbox/categraf/jinja2/input.disk/disk.toml.j2
|
|
23
|
+
src/pytbox/categraf/jinja2/input.diskio/diskio.toml.j2
|
|
24
|
+
src/pytbox/categraf/jinja2/input.dns_query/dns_query.toml.j2
|
|
25
|
+
src/pytbox/categraf/jinja2/input.http_response/http_response.toml.j2
|
|
26
|
+
src/pytbox/categraf/jinja2/input.mem/mem.toml.j2
|
|
27
|
+
src/pytbox/categraf/jinja2/input.net/net.toml.j2
|
|
28
|
+
src/pytbox/categraf/jinja2/input.net_response/net_response.toml.j2
|
|
29
|
+
src/pytbox/categraf/jinja2/input.ping/ping.toml.j2
|
|
30
|
+
src/pytbox/categraf/jinja2/input.prometheus/prometheus.toml.j2
|
|
31
|
+
src/pytbox/categraf/jinja2/input.snmp/h3c_interface.toml.j2
|
|
32
|
+
src/pytbox/categraf/jinja2/input.snmp/h3c_system.toml.j2
|
|
33
|
+
src/pytbox/categraf/jinja2/input.vsphere/vsphere.toml.j2
|
|
18
34
|
src/pytbox/cli/__init__.py
|
|
19
35
|
src/pytbox/cli/main.py
|
|
20
36
|
src/pytbox/cli/categraf/__init__.py
|
|
@@ -1,80 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
from pathlib import Path
|
|
5
|
-
from pytbox.utils.load_config import load_config_by_file
|
|
6
|
-
|
|
7
|
-
from jinja2 import Environment, FileSystemLoader
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
jinja2_path = Path(__file__).parent / 'jinja2'
|
|
11
|
-
env = Environment(loader=FileSystemLoader(jinja2_path))
|
|
12
|
-
|
|
13
|
-
ping_template = env.get_template('input.ping/ping.toml.j2')
|
|
14
|
-
prometheus_template = env.get_template('input.prometheus/prometheus.toml.j2')
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class BuildConfig:
|
|
18
|
-
'''
|
|
19
|
-
生成配置
|
|
20
|
-
|
|
21
|
-
Args:
|
|
22
|
-
instances (_type_): _description_
|
|
23
|
-
output_dir (_type_): _description_
|
|
24
|
-
'''
|
|
25
|
-
def __init__(self, instances, output_dir):
|
|
26
|
-
self.instances = load_config_by_file(instances)
|
|
27
|
-
self.output_dir = output_dir
|
|
28
|
-
|
|
29
|
-
def _get_template(self, template_name):
|
|
30
|
-
return env.get_template(template_name)
|
|
31
|
-
|
|
32
|
-
def common(self, input_name):
|
|
33
|
-
template = self._get_template(f'input.{input_name}/{input_name}.toml.j2')
|
|
34
|
-
render_data = template.render()
|
|
35
|
-
target_dir = Path(self.output_dir) / f'input.{input_name}'
|
|
36
|
-
if not target_dir.exists():
|
|
37
|
-
target_dir.mkdir(parents=True, exist_ok=True)
|
|
38
|
-
|
|
39
|
-
with open(Path(self.output_dir) / f'input.{input_name}' / f'{input_name}.toml', 'w', encoding='utf-8') as f:
|
|
40
|
-
f.write(render_data)
|
|
41
|
-
|
|
42
|
-
def ping(self):
|
|
43
|
-
instances = self.instances['ping']['instance']
|
|
44
|
-
render_data = ping_template.render(instances=instances)
|
|
45
|
-
target_dir = Path(self.output_dir) / 'input.ping'
|
|
46
|
-
if not target_dir.exists():
|
|
47
|
-
target_dir.mkdir(parents=True, exist_ok=True)
|
|
48
|
-
|
|
49
|
-
with open(Path(self.output_dir) / 'input.ping' / 'ping.toml', 'w', encoding='utf-8') as f:
|
|
50
|
-
f.write(render_data)
|
|
51
|
-
|
|
52
|
-
def prometheus(self):
|
|
53
|
-
instances = self.instances['prometheus']['urls']
|
|
54
|
-
render_data = prometheus_template.render(instances=instances)
|
|
55
|
-
target_dir = Path(self.output_dir) / 'input.prometheus'
|
|
56
|
-
if not target_dir.exists():
|
|
57
|
-
target_dir.mkdir(parents=True, exist_ok=True)
|
|
58
|
-
with open(Path(self.output_dir) / 'input.prometheus' / 'prometheus.toml', 'w', encoding='utf-8') as f:
|
|
59
|
-
f.write(render_data)
|
|
60
|
-
|
|
61
|
-
def vsphere(self):
|
|
62
|
-
template = self._get_template('input.vsphere/vsphere.toml.j2')
|
|
63
|
-
instances = self.instances['vsphere']['instance']
|
|
64
|
-
print(instances)
|
|
65
|
-
render_data = template.render(instances=instances)
|
|
66
|
-
target_dir = Path(self.output_dir) / 'input.vsphere'
|
|
67
|
-
if not target_dir.exists():
|
|
68
|
-
target_dir.mkdir(parents=True, exist_ok=True)
|
|
69
|
-
with open(Path(self.output_dir) / 'input.vsphere' / 'vsphere.toml', 'w', encoding='utf-8') as f:
|
|
70
|
-
f.write(render_data)
|
|
71
|
-
|
|
72
|
-
def run(self):
|
|
73
|
-
# self.common('cpu')
|
|
74
|
-
# self.common('mem')
|
|
75
|
-
# self.common('net')
|
|
76
|
-
# self.common('disk')
|
|
77
|
-
# self.common('diskio')
|
|
78
|
-
self.vsphere()
|
|
79
|
-
# self.ping()
|
|
80
|
-
# self.prometheus()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|