flexmetric 0.3.3__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flexmetric/metric_process/database_processing.py +16 -14
- flexmetric/metric_process/process_commands.py +23 -7
- flexmetric/metric_process/prometheus_agent.py +24 -13
- flexmetric/metric_process/views.py +46 -8
- {flexmetric-0.3.3.dist-info → flexmetric-0.4.1.dist-info}/METADATA +28 -23
- {flexmetric-0.3.3.dist-info → flexmetric-0.4.1.dist-info}/RECORD +9 -9
- {flexmetric-0.3.3.dist-info → flexmetric-0.4.1.dist-info}/WHEEL +0 -0
- {flexmetric-0.3.3.dist-info → flexmetric-0.4.1.dist-info}/entry_points.txt +0 -0
- {flexmetric-0.3.3.dist-info → flexmetric-0.4.1.dist-info}/top_level.txt +0 -0
@@ -60,8 +60,10 @@ def process_database_queries(queries_file, databases_file):
|
|
60
60
|
|
61
61
|
db_path = db_conf["db_connection"]
|
62
62
|
query = cmd["query"]
|
63
|
-
|
64
|
-
|
63
|
+
labels = cmd.get('labels', [])
|
64
|
+
label_values = cmd.get('label_values', [])
|
65
|
+
main_label = cmd.get('main_label', 'default_db_metric')
|
66
|
+
|
65
67
|
# check if query is safe
|
66
68
|
if is_safe_query(query):
|
67
69
|
value = execute_sqlite_query(db_path, query)
|
@@ -69,20 +71,20 @@ def process_database_queries(queries_file, databases_file):
|
|
69
71
|
print(f"[WARN] Unsupported query type: {query}")
|
70
72
|
return None
|
71
73
|
|
72
|
-
if
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
74
|
+
if not isinstance(label_values, list):
|
75
|
+
label_values = [label_values]
|
76
|
+
|
77
|
+
result = {
|
78
|
+
'result': [{
|
79
|
+
'label': label_values,
|
80
|
+
'value': value
|
81
|
+
}],
|
82
|
+
'labels': labels,
|
83
|
+
'main_label': main_label
|
84
|
+
}
|
85
|
+
all_results.append(result)
|
83
86
|
except Exception as e:
|
84
87
|
print(
|
85
88
|
f"[ERROR] Processing command '{cmd.get('name', 'unknown')}' failed: {e}"
|
86
89
|
)
|
87
|
-
|
88
90
|
return all_results
|
@@ -55,24 +55,40 @@ def parse_command_output(raw_output, label_column, value_column, fixed_label_val
|
|
55
55
|
|
56
56
|
def process_single_command(cmd_info):
|
57
57
|
command = cmd_info['command']
|
58
|
-
label_name = cmd_info['label']
|
59
58
|
timeout = cmd_info.get('timeout_seconds', 30)
|
60
|
-
|
59
|
+
labels = cmd_info.get('labels', [])
|
60
|
+
label_columns = cmd_info.get('label_columns', [])
|
61
61
|
value_column = cmd_info.get('value_column', 0)
|
62
|
-
|
62
|
+
main_label = cmd_info.get('main_label', 'default_metric')
|
63
63
|
|
64
64
|
raw_output = execute_command_with_timeout(command, timeout)
|
65
|
-
if raw_output
|
65
|
+
if not raw_output:
|
66
66
|
logger.warning(f"No results for command {command}")
|
67
67
|
return None
|
68
68
|
|
69
|
-
|
70
|
-
|
69
|
+
lines = raw_output.strip().split('\n')
|
70
|
+
if not lines:
|
71
|
+
logger.error(f"No valid lines returned from command: {command}")
|
72
|
+
return None
|
73
|
+
result_list = []
|
74
|
+
for line in lines:
|
75
|
+
parts = line.split()
|
76
|
+
try:
|
77
|
+
label_columns_value = []
|
78
|
+
for value in label_columns:
|
79
|
+
label_columns_value.append(parts[value])
|
80
|
+
result_list.append({ "label": label_columns_value, "value": parts[value_column] })
|
81
|
+
except Exception as e:
|
82
|
+
logger.error(f"Error parsing line: '{line}' → {e}")
|
83
|
+
continue
|
71
84
|
return {
|
72
85
|
'result': result_list,
|
73
|
-
'labels':
|
86
|
+
'labels': labels,
|
87
|
+
'main_label': main_label
|
74
88
|
}
|
75
89
|
|
90
|
+
|
91
|
+
|
76
92
|
def is_command_safe(command):
|
77
93
|
blacklist = ['rm', 'reboot', 'shutdown', 'halt', 'poweroff', 'mkfs', 'dd']
|
78
94
|
for dangerous_cmd in blacklist:
|
@@ -190,27 +190,37 @@ def measure(args):
|
|
190
190
|
if cmd_results != None:
|
191
191
|
exec_result.extend(cmd_results)
|
192
192
|
global gauges
|
193
|
+
|
193
194
|
for data in exec_result:
|
194
195
|
results = data["result"]
|
195
196
|
labels = data["labels"]
|
196
|
-
|
197
|
-
|
197
|
+
main_label_value = data.get("main_label", "default_main")
|
198
|
+
gauge_name = main_label_value.lower() + "_gauge"
|
199
|
+
|
198
200
|
if gauge_name not in gauges:
|
199
201
|
gauge = Gauge(gauge_name, f"{gauge_name} for different metrics", labels)
|
200
202
|
gauges[gauge_name] = gauge
|
201
203
|
else:
|
202
204
|
gauge = gauges[gauge_name]
|
205
|
+
|
203
206
|
for result in results:
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
207
|
+
label_values = result["label"]
|
208
|
+
|
209
|
+
if not isinstance(label_values, list):
|
210
|
+
# Automatically wrap single label into list for consistency
|
211
|
+
label_values = [label_values]
|
212
|
+
|
213
|
+
if len(label_values) != len(labels):
|
214
|
+
logger.error(f"Label mismatch: expected {len(labels)} values but got {len(label_values)}")
|
215
|
+
continue
|
216
|
+
|
217
|
+
label_dict = dict(zip(labels, label_values))
|
218
|
+
# print(label_dict)
|
219
|
+
|
220
|
+
try:
|
213
221
|
gauge.labels(**label_dict).set(convert_to_data_type(result["value"]))
|
222
|
+
except Exception as ex:
|
223
|
+
logger.error(f"Failed to set gauge for labels {label_dict}: {ex}")
|
214
224
|
|
215
225
|
|
216
226
|
def scheduled_measure(args):
|
@@ -244,5 +254,6 @@ def main():
|
|
244
254
|
secure_flask_run(args)
|
245
255
|
else:
|
246
256
|
run_flask(args.host, args.port)
|
247
|
-
|
248
|
-
|
257
|
+
# # args = arguments()
|
258
|
+
# # measure(args)
|
259
|
+
# main()
|
@@ -15,21 +15,59 @@ def add_update_metric_route():
|
|
15
15
|
@app.route("/update_metric", methods=["POST"])
|
16
16
|
def update_metric():
|
17
17
|
try:
|
18
|
-
data = request.get_json()
|
18
|
+
data = request.get_json(force=True)
|
19
19
|
|
20
|
-
|
21
|
-
|
20
|
+
# Top-level validation
|
21
|
+
if not isinstance(data, dict):
|
22
|
+
return jsonify({"status": "invalid structure", "error": "Top-level JSON must be an object"}), 400
|
22
23
|
|
23
|
-
|
24
|
+
required_keys = {"result", "labels", "main_label"}
|
25
|
+
if not required_keys.issubset(data):
|
26
|
+
return jsonify({"status": "invalid structure", "error": f"Missing keys: {required_keys - set(data)}"}), 400
|
27
|
+
|
28
|
+
result = data.get("result")
|
29
|
+
labels = data.get("labels")
|
30
|
+
main_label = data.get("main_label")
|
31
|
+
|
32
|
+
# Type validation
|
33
|
+
if not isinstance(result, list) or not all(isinstance(item, dict) for item in result):
|
34
|
+
return jsonify({"status": "invalid result", "error": "Result must be a list of dictionaries"}), 400
|
35
|
+
|
36
|
+
if not isinstance(labels, list) or not all(isinstance(label, str) for label in labels):
|
37
|
+
return jsonify({"status": "invalid labels", "error": "Labels must be a list of strings"}), 400
|
38
|
+
|
39
|
+
if not isinstance(main_label, str) or not main_label.strip():
|
40
|
+
return jsonify({"status": "invalid main_label", "error": "main_label must be a non-empty string"}), 400
|
41
|
+
|
42
|
+
for idx, item in enumerate(result):
|
24
43
|
if "label" not in item or "value" not in item:
|
25
|
-
return jsonify({"status": "
|
44
|
+
return jsonify({"status": "invalid result item", "error": f"Item {idx} missing 'label' or 'value'"}), 400
|
26
45
|
|
46
|
+
label_values = item["label"]
|
47
|
+
value = item["value"]
|
48
|
+
|
49
|
+
if not isinstance(label_values, list) or not all(isinstance(lv, str) for lv in label_values):
|
50
|
+
return jsonify({"status": "invalid label", "error": f"Item {idx} 'label' must be list of strings"}), 400
|
51
|
+
|
52
|
+
if len(label_values) != len(labels):
|
53
|
+
return jsonify({
|
54
|
+
"status": "label count mismatch",
|
55
|
+
"error": f"Item {idx} label count ({len(label_values)}) does not match labels length ({len(labels)})"
|
56
|
+
}), 400
|
57
|
+
|
58
|
+
try:
|
59
|
+
float(value) # Validate numeric value
|
60
|
+
except (ValueError, TypeError):
|
61
|
+
return jsonify({"status": "invalid value", "error": f"Item {idx} value must be numeric"}), 400
|
62
|
+
|
63
|
+
# If all checks pass, queue the metric
|
27
64
|
metric_queue.put(data)
|
28
|
-
print(
|
65
|
+
print("Queued:", data)
|
66
|
+
|
29
67
|
return jsonify({"status": "success"}), 200
|
30
68
|
|
31
|
-
except Exception:
|
32
|
-
return jsonify({"status": "
|
69
|
+
except Exception as e:
|
70
|
+
return jsonify({"status": "error", "message": str(e)}), 500
|
33
71
|
|
34
72
|
|
35
73
|
def run_flask(host, port):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: flexmetric
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.4.1
|
4
4
|
Summary: A secure flexible Prometheus exporter for commands, databases, functions, and scripts.
|
5
5
|
Home-page: https://github.com/nikhillingadhal1999/flexmetric
|
6
6
|
Author: Nikhil Lingadhal
|
@@ -138,15 +138,18 @@ https://localhost:5000/update_metric
|
|
138
138
|
|
139
139
|
### Submitting a Metric to the Flask API
|
140
140
|
```bash
|
141
|
-
curl -
|
141
|
+
curl -X POST http://localhost:5000/update_metric \
|
142
142
|
-H "Content-Type: application/json" \
|
143
143
|
-d '{
|
144
144
|
"result": [
|
145
|
-
{
|
145
|
+
{
|
146
|
+
"label": ["cpu", "core_1"],
|
147
|
+
"value": 42.5
|
148
|
+
}
|
146
149
|
],
|
147
|
-
"labels": ["
|
150
|
+
"labels": ["metric_type", "core"],
|
151
|
+
"main_label": "cpu_usage_metric"
|
148
152
|
}'
|
149
|
-
|
150
153
|
```
|
151
154
|
|
152
155
|
### commands.yaml
|
@@ -155,8 +158,9 @@ curl -k -X POST https://localhost:5000/update_metric \
|
|
155
158
|
commands:
|
156
159
|
- name: disk_usage
|
157
160
|
command: df -h
|
158
|
-
|
159
|
-
|
161
|
+
main_label: disk_usage_filesystem_mount_point
|
162
|
+
labels: ["filesystem", "mounted"]
|
163
|
+
label_columns: [0, -1]
|
160
164
|
value_column: 4
|
161
165
|
timeout_seconds: 60
|
162
166
|
```
|
@@ -169,15 +173,15 @@ Filesystem Size Used Avail Use% Mounted on
|
|
169
173
|
```
|
170
174
|
## Fields description
|
171
175
|
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
| `
|
176
|
-
| `
|
177
|
-
| `
|
178
|
-
| `
|
179
|
-
| `value_column`
|
180
|
-
| `timeout_seconds
|
176
|
+
| Field | Description |
|
177
|
+
|-------------------|----------------------------------------------------------------------------------------------------------------------------|
|
178
|
+
| `name` | A **nickname** you give to this check. It's just for your reference to know what this command is doing (e.g., `"disk_usage"`). |
|
179
|
+
| `command` | The **actual shell command** to run (e.g., `"df -h"`). It fetches the data you want to monitor. |
|
180
|
+
| `main_label` | The **metric name** that will appear in Prometheus. This is what you will query to see the metric values. |
|
181
|
+
| `labels` | A list of **label names** used to describe different dimensions of the metric (e.g., `["filesystem", "mounted"]`). |
|
182
|
+
| `label_columns` | A list of **column indexes** from the command’s output to extract the label values (e.g., `[0, -1]` for first and last column). |
|
183
|
+
| `value_column` | The **column index** from the command's output to extract the **numeric value** (the actual metric value, e.g., disk usage). |
|
184
|
+
| `timeout_seconds` | Maximum time (in seconds) to wait for the command to complete. If it exceeds this time, the command is aborted. |
|
181
185
|
|
182
186
|
## Database mode
|
183
187
|
|
@@ -188,13 +192,13 @@ databases:
|
|
188
192
|
db_connection: /path/to/my.db
|
189
193
|
````
|
190
194
|
```yaml
|
191
|
-
|
195
|
+
commands:
|
192
196
|
- name: user_count
|
193
|
-
|
194
|
-
db_name: mydb
|
197
|
+
database: userdb
|
195
198
|
query: "SELECT COUNT(*) FROM users;"
|
196
|
-
|
197
|
-
|
199
|
+
main_label: database_user_count
|
200
|
+
labels: ["database_name", "table_name"]
|
201
|
+
label_values: ["userdb", "users"]
|
198
202
|
```
|
199
203
|
## Functions mode
|
200
204
|
|
@@ -211,9 +215,10 @@ When using the `--functions` mode, each Python function you define is expected t
|
|
211
215
|
```python
|
212
216
|
{
|
213
217
|
'result': [
|
214
|
-
{'label':
|
218
|
+
{ 'label': [label_value1, label_value2, ...], 'value': numeric_value }
|
215
219
|
],
|
216
|
-
'labels': [
|
220
|
+
'labels': [label_name1, label_name2, ...],
|
221
|
+
'main_label': 'your_main_metric_name'
|
217
222
|
}
|
218
223
|
```
|
219
224
|
|
@@ -6,13 +6,13 @@ flexmetric/file_recognition/exec_file.py,sha256=9wBbnqPConxtLhqWTgigEr8VQG-fa9K_
|
|
6
6
|
flexmetric/logging_module/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
flexmetric/logging_module/logger.py,sha256=hXj9m2Q_KxJVI5YRHRoK6PXV5tO6NmvuVjq2Ipx_1tE,447
|
8
8
|
flexmetric/metric_process/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
-
flexmetric/metric_process/database_processing.py,sha256=
|
9
|
+
flexmetric/metric_process/database_processing.py,sha256=hbVbzIdO21NF0F3nILJ4d1x8fkks9P8s-wtn8qZ91qw,2739
|
10
10
|
flexmetric/metric_process/expiring_queue.py,sha256=1oC0MjloitPiRo7yDgVarz81ETEQavKI_W-GFUvp5_Y,1920
|
11
|
-
flexmetric/metric_process/process_commands.py,sha256=
|
12
|
-
flexmetric/metric_process/prometheus_agent.py,sha256=
|
13
|
-
flexmetric/metric_process/views.py,sha256=
|
14
|
-
flexmetric-0.
|
15
|
-
flexmetric-0.
|
16
|
-
flexmetric-0.
|
17
|
-
flexmetric-0.
|
18
|
-
flexmetric-0.
|
11
|
+
flexmetric/metric_process/process_commands.py,sha256=clGMQhLNcuJUO1gElpAS9Dyk0KU5w41DIguczjo7ceA,4089
|
12
|
+
flexmetric/metric_process/prometheus_agent.py,sha256=rZ9pQrqA3J_-sJtl48qkFIHIUH01iert05u4SmGN4Yc,7714
|
13
|
+
flexmetric/metric_process/views.py,sha256=BY695dCpTkJRc1KLC9RNpFTieFdHeHvyqyefmHhMauE,3297
|
14
|
+
flexmetric-0.4.1.dist-info/METADATA,sha256=kHQheeo9qUgtJD6Zyb8O7q6eZR8RTCs7DJXHDJ8Yfrc,10570
|
15
|
+
flexmetric-0.4.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
16
|
+
flexmetric-0.4.1.dist-info/entry_points.txt,sha256=urVePn5EWr3JqNvkYP7OsB_h2_Bqvv-Wq1MJRBexm8A,79
|
17
|
+
flexmetric-0.4.1.dist-info/top_level.txt,sha256=zBlrNwKhXUNhgu9RRZnXxYwYnmE-eocRe6wKSmQROA4,11
|
18
|
+
flexmetric-0.4.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|