flexmetric 0.3.3__tar.gz → 0.4.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. {flexmetric-0.3.3 → flexmetric-0.4.1}/PKG-INFO +28 -23
  2. {flexmetric-0.3.3 → flexmetric-0.4.1}/README.md +27 -22
  3. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/metric_process/database_processing.py +16 -14
  4. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/metric_process/process_commands.py +23 -7
  5. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/metric_process/prometheus_agent.py +24 -13
  6. flexmetric-0.4.1/flexmetric/metric_process/views.py +76 -0
  7. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric.egg-info/PKG-INFO +28 -23
  8. {flexmetric-0.3.3 → flexmetric-0.4.1}/setup.py +1 -1
  9. flexmetric-0.3.3/flexmetric/metric_process/views.py +0 -38
  10. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/__init__.py +0 -0
  11. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/config/__init__.py +0 -0
  12. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/config/configuration.py +0 -0
  13. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/file_recognition/__init__.py +0 -0
  14. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/file_recognition/exec_file.py +0 -0
  15. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/logging_module/__init__.py +0 -0
  16. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/logging_module/logger.py +0 -0
  17. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/metric_process/__init__.py +0 -0
  18. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric/metric_process/expiring_queue.py +0 -0
  19. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric.egg-info/SOURCES.txt +0 -0
  20. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric.egg-info/dependency_links.txt +0 -0
  21. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric.egg-info/entry_points.txt +0 -0
  22. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric.egg-info/requires.txt +0 -0
  23. {flexmetric-0.3.3 → flexmetric-0.4.1}/flexmetric.egg-info/top_level.txt +0 -0
  24. {flexmetric-0.3.3 → flexmetric-0.4.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flexmetric
3
- Version: 0.3.3
3
+ Version: 0.4.1
4
4
  Summary: A secure flexible Prometheus exporter for commands, databases, functions, and scripts.
5
5
  Home-page: https://github.com/nikhillingadhal1999/flexmetric
6
6
  Author: Nikhil Lingadhal
@@ -138,15 +138,18 @@ https://localhost:5000/update_metric
138
138
 
139
139
  ### Submitting a Metric to the Flask API
140
140
  ```bash
141
- curl -k -X POST https://localhost:5000/update_metric \
141
+ curl -X POST http://localhost:5000/update_metric \
142
142
  -H "Content-Type: application/json" \
143
143
  -d '{
144
144
  "result": [
145
- { "label": "cpu_usage", "value": 42.5 }
145
+ {
146
+ "label": ["cpu", "core_1"],
147
+ "value": 42.5
148
+ }
146
149
  ],
147
- "labels": ["cpu"]
150
+ "labels": ["metric_type", "core"],
151
+ "main_label": "cpu_usage_metric"
148
152
  }'
149
-
150
153
  ```
151
154
 
152
155
  ### commands.yaml
@@ -155,8 +158,9 @@ curl -k -X POST https://localhost:5000/update_metric \
155
158
  commands:
156
159
  - name: disk_usage
157
160
  command: df -h
158
- label: path
159
- label_column: -1
161
+ main_label: disk_usage_filesystem_mount_point
162
+ labels: ["filesystem", "mounted"]
163
+ label_columns: [0, -1]
160
164
  value_column: 4
161
165
  timeout_seconds: 60
162
166
  ```
@@ -169,15 +173,15 @@ Filesystem Size Used Avail Use% Mounted on
169
173
  ```
170
174
  ## Fields description
171
175
 
172
-
173
- | Field | Description |
174
- |------------------|-----------------------------------------------------------------------------|
175
- | `name` | The **name** of the metric (`disk_usage`). This is the metric name seen in Prometheus. |
176
- | `command` | The **shell command** to execute (`df -h` in this case). |
177
- | `label` | The **label name** to attach to the metric (`path`). |
178
- | `label_column` | The **column index** from the command's output to use as the label. Here `-1` means the **last column** (typically the mount path in `df -h`). |
179
- | `value_column` | The **column index** from the command's output to extract the **numeric value**. Here `4` refers to the fifth column, which is usually the **Use%** in `df -h`. |
180
- | `timeout_seconds`| How long (in seconds) to wait for the command before timing out. Here it's set to `60` seconds. |
176
+ | Field | Description |
177
+ |-------------------|----------------------------------------------------------------------------------------------------------------------------|
178
+ | `name` | A **nickname** you give to this check. It's just for your reference to know what this command is doing (e.g., `"disk_usage"`). |
179
+ | `command` | The **actual shell command** to run (e.g., `"df -h"`). It fetches the data you want to monitor. |
180
+ | `main_label` | The **metric name** that will appear in Prometheus. This is what you will query to see the metric values. |
181
+ | `labels` | A list of **label names** used to describe different dimensions of the metric (e.g., `["filesystem", "mounted"]`). |
182
+ | `label_columns` | A list of **column indexes** from the commands output to extract the label values (e.g., `[0, -1]` for first and last column). |
183
+ | `value_column` | The **column index** from the command's output to extract the **numeric value** (the actual metric value, e.g., disk usage). |
184
+ | `timeout_seconds` | Maximum time (in seconds) to wait for the command to complete. If it exceeds this time, the command is aborted. |
181
185
 
182
186
  ## Database mode
183
187
 
@@ -188,13 +192,13 @@ databases:
188
192
  db_connection: /path/to/my.db
189
193
  ````
190
194
  ```yaml
191
- queries:
195
+ commands:
192
196
  - name: user_count
193
- db_type: sqlite
194
- db_name: mydb
197
+ database: userdb
195
198
  query: "SELECT COUNT(*) FROM users;"
196
- label: table
197
- label_value: users
199
+ main_label: database_user_count
200
+ labels: ["database_name", "table_name"]
201
+ label_values: ["userdb", "users"]
198
202
  ```
199
203
  ## Functions mode
200
204
 
@@ -211,9 +215,10 @@ When using the `--functions` mode, each Python function you define is expected t
211
215
  ```python
212
216
  {
213
217
  'result': [
214
- {'label': <label_or_labels>, 'value': <numeric_value>}
218
+ { 'label': [label_value1, label_value2, ...], 'value': numeric_value }
215
219
  ],
216
- 'labels': [<label_name_1>]
220
+ 'labels': [label_name1, label_name2, ...],
221
+ 'main_label': 'your_main_metric_name'
217
222
  }
218
223
  ```
219
224
 
@@ -105,15 +105,18 @@ https://localhost:5000/update_metric
105
105
 
106
106
  ### Submitting a Metric to the Flask API
107
107
  ```bash
108
- curl -k -X POST https://localhost:5000/update_metric \
108
+ curl -X POST http://localhost:5000/update_metric \
109
109
  -H "Content-Type: application/json" \
110
110
  -d '{
111
111
  "result": [
112
- { "label": "cpu_usage", "value": 42.5 }
112
+ {
113
+ "label": ["cpu", "core_1"],
114
+ "value": 42.5
115
+ }
113
116
  ],
114
- "labels": ["cpu"]
117
+ "labels": ["metric_type", "core"],
118
+ "main_label": "cpu_usage_metric"
115
119
  }'
116
-
117
120
  ```
118
121
 
119
122
  ### commands.yaml
@@ -122,8 +125,9 @@ curl -k -X POST https://localhost:5000/update_metric \
122
125
  commands:
123
126
  - name: disk_usage
124
127
  command: df -h
125
- label: path
126
- label_column: -1
128
+ main_label: disk_usage_filesystem_mount_point
129
+ labels: ["filesystem", "mounted"]
130
+ label_columns: [0, -1]
127
131
  value_column: 4
128
132
  timeout_seconds: 60
129
133
  ```
@@ -136,15 +140,15 @@ Filesystem Size Used Avail Use% Mounted on
136
140
  ```
137
141
  ## Fields description
138
142
 
139
-
140
- | Field | Description |
141
- |------------------|-----------------------------------------------------------------------------|
142
- | `name` | The **name** of the metric (`disk_usage`). This is the metric name seen in Prometheus. |
143
- | `command` | The **shell command** to execute (`df -h` in this case). |
144
- | `label` | The **label name** to attach to the metric (`path`). |
145
- | `label_column` | The **column index** from the command's output to use as the label. Here `-1` means the **last column** (typically the mount path in `df -h`). |
146
- | `value_column` | The **column index** from the command's output to extract the **numeric value**. Here `4` refers to the fifth column, which is usually the **Use%** in `df -h`. |
147
- | `timeout_seconds`| How long (in seconds) to wait for the command before timing out. Here it's set to `60` seconds. |
143
+ | Field | Description |
144
+ |-------------------|----------------------------------------------------------------------------------------------------------------------------|
145
+ | `name` | A **nickname** you give to this check. It's just for your reference to know what this command is doing (e.g., `"disk_usage"`). |
146
+ | `command` | The **actual shell command** to run (e.g., `"df -h"`). It fetches the data you want to monitor. |
147
+ | `main_label` | The **metric name** that will appear in Prometheus. This is what you will query to see the metric values. |
148
+ | `labels` | A list of **label names** used to describe different dimensions of the metric (e.g., `["filesystem", "mounted"]`). |
149
+ | `label_columns` | A list of **column indexes** from the commands output to extract the label values (e.g., `[0, -1]` for first and last column). |
150
+ | `value_column` | The **column index** from the command's output to extract the **numeric value** (the actual metric value, e.g., disk usage). |
151
+ | `timeout_seconds` | Maximum time (in seconds) to wait for the command to complete. If it exceeds this time, the command is aborted. |
148
152
 
149
153
  ## Database mode
150
154
 
@@ -155,13 +159,13 @@ databases:
155
159
  db_connection: /path/to/my.db
156
160
  ````
157
161
  ```yaml
158
- queries:
162
+ commands:
159
163
  - name: user_count
160
- db_type: sqlite
161
- db_name: mydb
164
+ database: userdb
162
165
  query: "SELECT COUNT(*) FROM users;"
163
- label: table
164
- label_value: users
166
+ main_label: database_user_count
167
+ labels: ["database_name", "table_name"]
168
+ label_values: ["userdb", "users"]
165
169
  ```
166
170
  ## Functions mode
167
171
 
@@ -178,9 +182,10 @@ When using the `--functions` mode, each Python function you define is expected t
178
182
  ```python
179
183
  {
180
184
  'result': [
181
- {'label': <label_or_labels>, 'value': <numeric_value>}
185
+ { 'label': [label_value1, label_value2, ...], 'value': numeric_value }
182
186
  ],
183
- 'labels': [<label_name_1>]
187
+ 'labels': [label_name1, label_name2, ...],
188
+ 'main_label': 'your_main_metric_name'
184
189
  }
185
190
  ```
186
191
 
@@ -60,8 +60,10 @@ def process_database_queries(queries_file, databases_file):
60
60
 
61
61
  db_path = db_conf["db_connection"]
62
62
  query = cmd["query"]
63
- label = cmd["label"]
64
- label_value = cmd["label_value"]
63
+ labels = cmd.get('labels', [])
64
+ label_values = cmd.get('label_values', [])
65
+ main_label = cmd.get('main_label', 'default_db_metric')
66
+
65
67
  # check if query is safe
66
68
  if is_safe_query(query):
67
69
  value = execute_sqlite_query(db_path, query)
@@ -69,20 +71,20 @@ def process_database_queries(queries_file, databases_file):
69
71
  print(f"[WARN] Unsupported query type: {query}")
70
72
  return None
71
73
 
72
- if value is not None:
73
- result = {
74
- "result": [{"label": label_value, "value": value}],
75
- "labels": [label],
76
- }
77
- all_results.append(result)
78
- else:
79
- print(
80
- f"[INFO] No result for command '{cmd['name']}' on database '{cmd['database']}'"
81
- )
82
-
74
+ if not isinstance(label_values, list):
75
+ label_values = [label_values]
76
+
77
+ result = {
78
+ 'result': [{
79
+ 'label': label_values,
80
+ 'value': value
81
+ }],
82
+ 'labels': labels,
83
+ 'main_label': main_label
84
+ }
85
+ all_results.append(result)
83
86
  except Exception as e:
84
87
  print(
85
88
  f"[ERROR] Processing command '{cmd.get('name', 'unknown')}' failed: {e}"
86
89
  )
87
-
88
90
  return all_results
@@ -55,24 +55,40 @@ def parse_command_output(raw_output, label_column, value_column, fixed_label_val
55
55
 
56
56
  def process_single_command(cmd_info):
57
57
  command = cmd_info['command']
58
- label_name = cmd_info['label']
59
58
  timeout = cmd_info.get('timeout_seconds', 30)
60
- label_column = cmd_info.get('label_column', -1)
59
+ labels = cmd_info.get('labels', [])
60
+ label_columns = cmd_info.get('label_columns', [])
61
61
  value_column = cmd_info.get('value_column', 0)
62
- fixed_label_value = cmd_info.get('label_value')
62
+ main_label = cmd_info.get('main_label', 'default_metric')
63
63
 
64
64
  raw_output = execute_command_with_timeout(command, timeout)
65
- if raw_output == '':
65
+ if not raw_output:
66
66
  logger.warning(f"No results for command {command}")
67
67
  return None
68
68
 
69
- result_list = parse_command_output(raw_output, label_column, value_column, fixed_label_value)
70
-
69
+ lines = raw_output.strip().split('\n')
70
+ if not lines:
71
+ logger.error(f"No valid lines returned from command: {command}")
72
+ return None
73
+ result_list = []
74
+ for line in lines:
75
+ parts = line.split()
76
+ try:
77
+ label_columns_value = []
78
+ for value in label_columns:
79
+ label_columns_value.append(parts[value])
80
+ result_list.append({ "label": label_columns_value, "value": parts[value_column] })
81
+ except Exception as e:
82
+ logger.error(f"Error parsing line: '{line}' → {e}")
83
+ continue
71
84
  return {
72
85
  'result': result_list,
73
- 'labels': [label_name]
86
+ 'labels': labels,
87
+ 'main_label': main_label
74
88
  }
75
89
 
90
+
91
+
76
92
  def is_command_safe(command):
77
93
  blacklist = ['rm', 'reboot', 'shutdown', 'halt', 'poweroff', 'mkfs', 'dd']
78
94
  for dangerous_cmd in blacklist:
@@ -190,27 +190,37 @@ def measure(args):
190
190
  if cmd_results != None:
191
191
  exec_result.extend(cmd_results)
192
192
  global gauges
193
+
193
194
  for data in exec_result:
194
195
  results = data["result"]
195
196
  labels = data["labels"]
196
- gauge_name = "_".join(labels).lower() + "_gauge"
197
- # print(labels)
197
+ main_label_value = data.get("main_label", "default_main")
198
+ gauge_name = main_label_value.lower() + "_gauge"
199
+
198
200
  if gauge_name not in gauges:
199
201
  gauge = Gauge(gauge_name, f"{gauge_name} for different metrics", labels)
200
202
  gauges[gauge_name] = gauge
201
203
  else:
202
204
  gauge = gauges[gauge_name]
205
+
203
206
  for result in results:
204
- if isinstance(result["label"], str):
205
- try:
206
- gauge.labels(result["label"]).set(
207
- convert_to_data_type(result["value"])
208
- )
209
- except Exception as ex:
210
- logger.error("Cannot pass string")
211
- elif isinstance(result["label"], list):
212
- label_dict = dict(zip(labels, result["label"]))
207
+ label_values = result["label"]
208
+
209
+ if not isinstance(label_values, list):
210
+ # Automatically wrap single label into list for consistency
211
+ label_values = [label_values]
212
+
213
+ if len(label_values) != len(labels):
214
+ logger.error(f"Label mismatch: expected {len(labels)} values but got {len(label_values)}")
215
+ continue
216
+
217
+ label_dict = dict(zip(labels, label_values))
218
+ # print(label_dict)
219
+
220
+ try:
213
221
  gauge.labels(**label_dict).set(convert_to_data_type(result["value"]))
222
+ except Exception as ex:
223
+ logger.error(f"Failed to set gauge for labels {label_dict}: {ex}")
214
224
 
215
225
 
216
226
  def scheduled_measure(args):
@@ -244,5 +254,6 @@ def main():
244
254
  secure_flask_run(args)
245
255
  else:
246
256
  run_flask(args.host, args.port)
247
-
248
- main()
257
+ # # args = arguments()
258
+ # # measure(args)
259
+ # main()
@@ -0,0 +1,76 @@
1
+ from flask import Flask, request, jsonify, Response
2
+ from flexmetric.metric_process.expiring_queue import metric_queue
3
+ import argparse
4
+ from prometheus_client import generate_latest, REGISTRY, CONTENT_TYPE_LATEST
5
+
6
+ app = Flask(__name__)
7
+
8
+
9
+ @app.route('/metrics')
10
+ def metrics():
11
+ return Response(generate_latest(REGISTRY), mimetype=CONTENT_TYPE_LATEST)
12
+
13
+
14
+ def add_update_metric_route():
15
+ @app.route("/update_metric", methods=["POST"])
16
+ def update_metric():
17
+ try:
18
+ data = request.get_json(force=True)
19
+
20
+ # Top-level validation
21
+ if not isinstance(data, dict):
22
+ return jsonify({"status": "invalid structure", "error": "Top-level JSON must be an object"}), 400
23
+
24
+ required_keys = {"result", "labels", "main_label"}
25
+ if not required_keys.issubset(data):
26
+ return jsonify({"status": "invalid structure", "error": f"Missing keys: {required_keys - set(data)}"}), 400
27
+
28
+ result = data.get("result")
29
+ labels = data.get("labels")
30
+ main_label = data.get("main_label")
31
+
32
+ # Type validation
33
+ if not isinstance(result, list) or not all(isinstance(item, dict) for item in result):
34
+ return jsonify({"status": "invalid result", "error": "Result must be a list of dictionaries"}), 400
35
+
36
+ if not isinstance(labels, list) or not all(isinstance(label, str) for label in labels):
37
+ return jsonify({"status": "invalid labels", "error": "Labels must be a list of strings"}), 400
38
+
39
+ if not isinstance(main_label, str) or not main_label.strip():
40
+ return jsonify({"status": "invalid main_label", "error": "main_label must be a non-empty string"}), 400
41
+
42
+ for idx, item in enumerate(result):
43
+ if "label" not in item or "value" not in item:
44
+ return jsonify({"status": "invalid result item", "error": f"Item {idx} missing 'label' or 'value'"}), 400
45
+
46
+ label_values = item["label"]
47
+ value = item["value"]
48
+
49
+ if not isinstance(label_values, list) or not all(isinstance(lv, str) for lv in label_values):
50
+ return jsonify({"status": "invalid label", "error": f"Item {idx} 'label' must be list of strings"}), 400
51
+
52
+ if len(label_values) != len(labels):
53
+ return jsonify({
54
+ "status": "label count mismatch",
55
+ "error": f"Item {idx} label count ({len(label_values)}) does not match labels length ({len(labels)})"
56
+ }), 400
57
+
58
+ try:
59
+ float(value) # Validate numeric value
60
+ except (ValueError, TypeError):
61
+ return jsonify({"status": "invalid value", "error": f"Item {idx} value must be numeric"}), 400
62
+
63
+ # If all checks pass, queue the metric
64
+ metric_queue.put(data)
65
+ print("Queued:", data)
66
+
67
+ return jsonify({"status": "success"}), 200
68
+
69
+ except Exception as e:
70
+ return jsonify({"status": "error", "message": str(e)}), 500
71
+
72
+
73
+ def run_flask(host, port):
74
+ app.run(host=host, port=port)
75
+ def secure_flask_run(args):
76
+ app.run(host=args.host, port=args.port, ssl_context=(args.ssl_cert, args.ssl_key))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flexmetric
3
- Version: 0.3.3
3
+ Version: 0.4.1
4
4
  Summary: A secure flexible Prometheus exporter for commands, databases, functions, and scripts.
5
5
  Home-page: https://github.com/nikhillingadhal1999/flexmetric
6
6
  Author: Nikhil Lingadhal
@@ -138,15 +138,18 @@ https://localhost:5000/update_metric
138
138
 
139
139
  ### Submitting a Metric to the Flask API
140
140
  ```bash
141
- curl -k -X POST https://localhost:5000/update_metric \
141
+ curl -X POST http://localhost:5000/update_metric \
142
142
  -H "Content-Type: application/json" \
143
143
  -d '{
144
144
  "result": [
145
- { "label": "cpu_usage", "value": 42.5 }
145
+ {
146
+ "label": ["cpu", "core_1"],
147
+ "value": 42.5
148
+ }
146
149
  ],
147
- "labels": ["cpu"]
150
+ "labels": ["metric_type", "core"],
151
+ "main_label": "cpu_usage_metric"
148
152
  }'
149
-
150
153
  ```
151
154
 
152
155
  ### commands.yaml
@@ -155,8 +158,9 @@ curl -k -X POST https://localhost:5000/update_metric \
155
158
  commands:
156
159
  - name: disk_usage
157
160
  command: df -h
158
- label: path
159
- label_column: -1
161
+ main_label: disk_usage_filesystem_mount_point
162
+ labels: ["filesystem", "mounted"]
163
+ label_columns: [0, -1]
160
164
  value_column: 4
161
165
  timeout_seconds: 60
162
166
  ```
@@ -169,15 +173,15 @@ Filesystem Size Used Avail Use% Mounted on
169
173
  ```
170
174
  ## Fields description
171
175
 
172
-
173
- | Field | Description |
174
- |------------------|-----------------------------------------------------------------------------|
175
- | `name` | The **name** of the metric (`disk_usage`). This is the metric name seen in Prometheus. |
176
- | `command` | The **shell command** to execute (`df -h` in this case). |
177
- | `label` | The **label name** to attach to the metric (`path`). |
178
- | `label_column` | The **column index** from the command's output to use as the label. Here `-1` means the **last column** (typically the mount path in `df -h`). |
179
- | `value_column` | The **column index** from the command's output to extract the **numeric value**. Here `4` refers to the fifth column, which is usually the **Use%** in `df -h`. |
180
- | `timeout_seconds`| How long (in seconds) to wait for the command before timing out. Here it's set to `60` seconds. |
176
+ | Field | Description |
177
+ |-------------------|----------------------------------------------------------------------------------------------------------------------------|
178
+ | `name` | A **nickname** you give to this check. It's just for your reference to know what this command is doing (e.g., `"disk_usage"`). |
179
+ | `command` | The **actual shell command** to run (e.g., `"df -h"`). It fetches the data you want to monitor. |
180
+ | `main_label` | The **metric name** that will appear in Prometheus. This is what you will query to see the metric values. |
181
+ | `labels` | A list of **label names** used to describe different dimensions of the metric (e.g., `["filesystem", "mounted"]`). |
182
+ | `label_columns` | A list of **column indexes** from the commands output to extract the label values (e.g., `[0, -1]` for first and last column). |
183
+ | `value_column` | The **column index** from the command's output to extract the **numeric value** (the actual metric value, e.g., disk usage). |
184
+ | `timeout_seconds` | Maximum time (in seconds) to wait for the command to complete. If it exceeds this time, the command is aborted. |
181
185
 
182
186
  ## Database mode
183
187
 
@@ -188,13 +192,13 @@ databases:
188
192
  db_connection: /path/to/my.db
189
193
  ````
190
194
  ```yaml
191
- queries:
195
+ commands:
192
196
  - name: user_count
193
- db_type: sqlite
194
- db_name: mydb
197
+ database: userdb
195
198
  query: "SELECT COUNT(*) FROM users;"
196
- label: table
197
- label_value: users
199
+ main_label: database_user_count
200
+ labels: ["database_name", "table_name"]
201
+ label_values: ["userdb", "users"]
198
202
  ```
199
203
  ## Functions mode
200
204
 
@@ -211,9 +215,10 @@ When using the `--functions` mode, each Python function you define is expected t
211
215
  ```python
212
216
  {
213
217
  'result': [
214
- {'label': <label_or_labels>, 'value': <numeric_value>}
218
+ { 'label': [label_value1, label_value2, ...], 'value': numeric_value }
215
219
  ],
216
- 'labels': [<label_name_1>]
220
+ 'labels': [label_name1, label_name2, ...],
221
+ 'main_label': 'your_main_metric_name'
217
222
  }
218
223
  ```
219
224
 
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="flexmetric",
5
- version="0.3.3",
5
+ version="0.4.1",
6
6
  author="Nikhil Lingadhal",
7
7
  description="A secure flexible Prometheus exporter for commands, databases, functions, and scripts.",
8
8
  long_description=open("README.md").read(),
@@ -1,38 +0,0 @@
1
- from flask import Flask, request, jsonify, Response
2
- from flexmetric.metric_process.expiring_queue import metric_queue
3
- import argparse
4
- from prometheus_client import generate_latest, REGISTRY, CONTENT_TYPE_LATEST
5
-
6
- app = Flask(__name__)
7
-
8
-
9
- @app.route('/metrics')
10
- def metrics():
11
- return Response(generate_latest(REGISTRY), mimetype=CONTENT_TYPE_LATEST)
12
-
13
-
14
- def add_update_metric_route():
15
- @app.route("/update_metric", methods=["POST"])
16
- def update_metric():
17
- try:
18
- data = request.get_json()
19
-
20
- if not data or "result" not in data or not isinstance(data["result"], list):
21
- return jsonify({"status": "no"}), 400
22
-
23
- for item in data["result"]:
24
- if "label" not in item or "value" not in item:
25
- return jsonify({"status": "no"}), 400
26
-
27
- metric_queue.put(data)
28
- print(metric_queue)
29
- return jsonify({"status": "success"}), 200
30
-
31
- except Exception:
32
- return jsonify({"status": "no"}), 500
33
-
34
-
35
- def run_flask(host, port):
36
- app.run(host=host, port=port)
37
- def secure_flask_run(args):
38
- app.run(host=args.host, port=args.port, ssl_context=(args.ssl_cert, args.ssl_key))
File without changes