catocli 3.0.18__py3-none-any.whl → 3.0.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of catocli might be problematic. Click here for more details.
- catocli/Utils/clidriver.py +16 -8
- catocli/Utils/formatter_account_metrics.py +544 -0
- catocli/Utils/formatter_app_stats.py +184 -0
- catocli/Utils/formatter_app_stats_timeseries.py +377 -0
- catocli/Utils/formatter_events_timeseries.py +459 -0
- catocli/Utils/formatter_socket_port_metrics.py +189 -0
- catocli/Utils/formatter_socket_port_metrics_timeseries.py +339 -0
- catocli/Utils/formatter_utils.py +251 -0
- catocli/__init__.py +1 -1
- catocli/clisettings.json +37 -5
- catocli/parsers/customParserApiClient.py +211 -66
- catocli/parsers/mutation_policy/__init__.py +405 -405
- catocli/parsers/mutation_site/__init__.py +15 -15
- catocli/parsers/mutation_sites/__init__.py +15 -15
- catocli/parsers/query_accountMetrics/README.md +90 -0
- catocli/parsers/query_accountMetrics/__init__.py +6 -0
- catocli/parsers/query_appStats/README.md +2 -2
- catocli/parsers/query_appStats/__init__.py +4 -2
- catocli/parsers/query_appStatsTimeSeries/__init__.py +4 -2
- catocli/parsers/query_eventsTimeSeries/README.md +280 -0
- catocli/parsers/query_eventsTimeSeries/__init__.py +6 -0
- catocli/parsers/query_policy/__init__.py +42 -42
- catocli/parsers/query_socketPortMetrics/README.md +44 -0
- catocli/parsers/query_socketPortMetrics/__init__.py +6 -0
- catocli/parsers/query_socketPortMetricsTimeSeries/README.md +83 -0
- catocli/parsers/query_socketPortMetricsTimeSeries/__init__.py +4 -2
- catocli/parsers/utils/export_utils.py +6 -2
- catocli-3.0.24.dist-info/METADATA +184 -0
- {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/RECORD +37 -35
- {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/top_level.txt +0 -1
- models/mutation.xdr.analystFeedback.json +822 -87
- models/query.xdr.stories.json +822 -87
- models/query.xdr.story.json +822 -87
- schema/catolib.py +89 -64
- catocli/Utils/csv_formatter.py +0 -663
- catocli-3.0.18.dist-info/METADATA +0 -124
- scripts/catolib.py +0 -62
- scripts/export_if_rules_to_json.py +0 -188
- scripts/export_wf_rules_to_json.py +0 -111
- scripts/import_wf_rules_to_tfstate.py +0 -331
- {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/WHEEL +0 -0
- {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/entry_points.txt +0 -0
- {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Socket Port Metrics Timeseries Formatter for Cato CLI
|
|
4
|
+
|
|
5
|
+
This module provides functions to format socketPortMetricsTimeSeries API responses
|
|
6
|
+
into JSON and CSV formats, with special handling for timeseries data
|
|
7
|
+
and unit conversions.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import csv
|
|
11
|
+
import io
|
|
12
|
+
import json
|
|
13
|
+
from datetime import datetime
|
|
14
|
+
from typing import Dict, List, Any, Tuple
|
|
15
|
+
|
|
16
|
+
# Import shared utility functions
|
|
17
|
+
try:
|
|
18
|
+
from .formatter_utils import convert_bytes_to_mb, format_timestamp, is_bytes_measure, parse_label_for_dimensions_and_measure
|
|
19
|
+
except ImportError:
|
|
20
|
+
try:
|
|
21
|
+
from catocli.Utils.formatter_utils import convert_bytes_to_mb, format_timestamp, is_bytes_measure, parse_label_for_dimensions_and_measure
|
|
22
|
+
except ImportError:
|
|
23
|
+
from formatter_utils import convert_bytes_to_mb, format_timestamp, is_bytes_measure, parse_label_for_dimensions_and_measure
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def format_socket_port_metrics_timeseries(response_data: Dict[str, Any], output_format: str = 'json') -> str:
|
|
27
|
+
"""
|
|
28
|
+
Convert socketPortMetricsTimeSeries JSON response to specified format (JSON or CSV)
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
response_data: JSON response from socketPortMetricsTimeSeries query
|
|
32
|
+
output_format: 'json' or 'csv'
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Formatted string in the requested format, or None if no processable data
|
|
36
|
+
"""
|
|
37
|
+
if output_format.lower() == 'csv':
|
|
38
|
+
return _format_socket_port_metrics_timeseries_to_csv(response_data)
|
|
39
|
+
else:
|
|
40
|
+
# Default to JSON format with organized structure
|
|
41
|
+
return _format_socket_port_metrics_timeseries_to_json(response_data)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _format_socket_port_metrics_timeseries_to_json(response_data: Dict[str, Any]) -> str:
|
|
45
|
+
"""
|
|
46
|
+
Convert socketPortMetricsTimeSeries JSON response to organized JSON format
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
response_data: JSON response from socketPortMetricsTimeSeries query
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
JSON formatted string, or None if no processable data
|
|
53
|
+
"""
|
|
54
|
+
if not response_data or not isinstance(response_data, dict):
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
# Check for API errors
|
|
58
|
+
if 'errors' in response_data:
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
if 'data' not in response_data or 'socketPortMetricsTimeSeries' not in response_data['data']:
|
|
62
|
+
return None
|
|
63
|
+
|
|
64
|
+
socket_metrics_ts = response_data['data']['socketPortMetricsTimeSeries']
|
|
65
|
+
if socket_metrics_ts is None:
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
timeseries = socket_metrics_ts.get('timeseries', [])
|
|
69
|
+
|
|
70
|
+
if not timeseries:
|
|
71
|
+
return None
|
|
72
|
+
|
|
73
|
+
# Parse measures from labels - these are simpler than appStatsTimeSeries
|
|
74
|
+
parsed_series = []
|
|
75
|
+
all_timestamps = set()
|
|
76
|
+
all_measures = set()
|
|
77
|
+
|
|
78
|
+
for series in timeseries:
|
|
79
|
+
label = series.get('label', '')
|
|
80
|
+
data_points = series.get('data', [])
|
|
81
|
+
units = series.get('unitsTimeseries', '')
|
|
82
|
+
info = series.get('info', [])
|
|
83
|
+
|
|
84
|
+
# Extract measure from label - usually just "sum(measure_name)"
|
|
85
|
+
measure, dimensions = parse_label_for_dimensions_and_measure(label)
|
|
86
|
+
|
|
87
|
+
# If no dimensions found in label, create default dimensions from info if available
|
|
88
|
+
if not dimensions and info:
|
|
89
|
+
for i, info_value in enumerate(info):
|
|
90
|
+
dimensions[f'info_{i}'] = str(info_value)
|
|
91
|
+
|
|
92
|
+
# If still no dimensions, create a single default dimension
|
|
93
|
+
if not dimensions:
|
|
94
|
+
dimensions = {'metric_source': 'socket_port'}
|
|
95
|
+
|
|
96
|
+
# Create series entry with safe data parsing
|
|
97
|
+
data_dict = {}
|
|
98
|
+
for point in data_points:
|
|
99
|
+
if isinstance(point, (list, tuple)) and len(point) >= 2:
|
|
100
|
+
timestamp = int(point[0])
|
|
101
|
+
value = point[1]
|
|
102
|
+
data_dict[timestamp] = value
|
|
103
|
+
all_timestamps.add(timestamp)
|
|
104
|
+
|
|
105
|
+
series_entry = {
|
|
106
|
+
'label': label,
|
|
107
|
+
'measure': measure,
|
|
108
|
+
'dimensions': dimensions,
|
|
109
|
+
'units': units,
|
|
110
|
+
'data_points': len(data_dict),
|
|
111
|
+
'time_range': {
|
|
112
|
+
'start': format_timestamp(min(data_dict.keys())) if data_dict else None,
|
|
113
|
+
'end': format_timestamp(max(data_dict.keys())) if data_dict else None
|
|
114
|
+
},
|
|
115
|
+
'data': data_dict
|
|
116
|
+
}
|
|
117
|
+
parsed_series.append(series_entry)
|
|
118
|
+
|
|
119
|
+
# Collect metadata
|
|
120
|
+
all_measures.add(measure)
|
|
121
|
+
|
|
122
|
+
# Organize data
|
|
123
|
+
organized_data = {
|
|
124
|
+
"socketPortMetricsTimeSeries": {
|
|
125
|
+
"summary": {
|
|
126
|
+
"total_series": len(parsed_series),
|
|
127
|
+
"total_timestamps": len(all_timestamps),
|
|
128
|
+
"time_range": {
|
|
129
|
+
"start": format_timestamp(min(all_timestamps)) if all_timestamps else None,
|
|
130
|
+
"end": format_timestamp(max(all_timestamps)) if all_timestamps else None
|
|
131
|
+
},
|
|
132
|
+
"measures": sorted(list(all_measures))
|
|
133
|
+
},
|
|
134
|
+
"series": []
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
# Group series by dimension combinations
|
|
139
|
+
dimension_groups = {}
|
|
140
|
+
for series in parsed_series:
|
|
141
|
+
dim_key = tuple(sorted(series['dimensions'].items()))
|
|
142
|
+
if dim_key not in dimension_groups:
|
|
143
|
+
dimension_groups[dim_key] = {
|
|
144
|
+
'dimensions': series['dimensions'],
|
|
145
|
+
'measures': {},
|
|
146
|
+
'time_range': series['time_range']
|
|
147
|
+
}
|
|
148
|
+
dimension_groups[dim_key]['measures'][series['measure']] = {
|
|
149
|
+
'label': series['label'],
|
|
150
|
+
'units': series['units'],
|
|
151
|
+
'data_points': series['data_points'],
|
|
152
|
+
'data': series['data']
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
# Convert to organized format with unit conversion
|
|
156
|
+
for dim_combo, group_data in dimension_groups.items():
|
|
157
|
+
series_data = {
|
|
158
|
+
'dimensions': group_data['dimensions'],
|
|
159
|
+
'time_range': group_data['time_range'],
|
|
160
|
+
'measures': {}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
for measure, measure_data in group_data['measures'].items():
|
|
164
|
+
formatted_data = {}
|
|
165
|
+
for timestamp, value in measure_data['data'].items():
|
|
166
|
+
timestamp_str = format_timestamp(timestamp)
|
|
167
|
+
|
|
168
|
+
if is_bytes_measure(measure, measure_data['units']) and value:
|
|
169
|
+
try:
|
|
170
|
+
converted_value = convert_bytes_to_mb(value)
|
|
171
|
+
formatted_data[timestamp_str] = {
|
|
172
|
+
'value': value,
|
|
173
|
+
'formatted_mb': converted_value,
|
|
174
|
+
'unit_type': 'bytes'
|
|
175
|
+
}
|
|
176
|
+
except (ValueError, ZeroDivisionError):
|
|
177
|
+
formatted_data[timestamp_str] = {
|
|
178
|
+
'value': value,
|
|
179
|
+
'unit_type': 'bytes'
|
|
180
|
+
}
|
|
181
|
+
else:
|
|
182
|
+
formatted_data[timestamp_str] = {
|
|
183
|
+
'value': value,
|
|
184
|
+
'unit_type': measure_data['units'] or 'unknown'
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
series_data['measures'][measure] = {
|
|
188
|
+
'label': measure_data['label'],
|
|
189
|
+
'units': measure_data['units'],
|
|
190
|
+
'data_points': measure_data['data_points'],
|
|
191
|
+
'data': formatted_data
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
organized_data["socketPortMetricsTimeSeries"]["series"].append(series_data)
|
|
195
|
+
|
|
196
|
+
return json.dumps(organized_data, indent=2)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def _format_socket_port_metrics_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
|
|
200
|
+
"""
|
|
201
|
+
Convert socketPortMetricsTimeSeries JSON response to CSV format
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
response_data: JSON response from socketPortMetricsTimeSeries query
|
|
205
|
+
|
|
206
|
+
Returns:
|
|
207
|
+
CSV formatted string in long format with one row per timestamp, or None if no processable data
|
|
208
|
+
"""
|
|
209
|
+
if not response_data or 'data' not in response_data or 'socketPortMetricsTimeSeries' not in response_data['data']:
|
|
210
|
+
return None
|
|
211
|
+
|
|
212
|
+
socket_metrics_ts = response_data['data']['socketPortMetricsTimeSeries']
|
|
213
|
+
if socket_metrics_ts is None:
|
|
214
|
+
return None
|
|
215
|
+
|
|
216
|
+
timeseries = socket_metrics_ts.get('timeseries', [])
|
|
217
|
+
|
|
218
|
+
if not timeseries:
|
|
219
|
+
return None
|
|
220
|
+
|
|
221
|
+
# Parse measures from labels - these are simpler than appStatsTimeSeries
|
|
222
|
+
# Labels are like: "sum(throughput_downstream)" with no dimensions
|
|
223
|
+
parsed_series = []
|
|
224
|
+
all_timestamps = set()
|
|
225
|
+
|
|
226
|
+
for series in timeseries:
|
|
227
|
+
label = series.get('label', '')
|
|
228
|
+
data_points = series.get('data', [])
|
|
229
|
+
units = series.get('unitsTimeseries', '')
|
|
230
|
+
info = series.get('info', [])
|
|
231
|
+
|
|
232
|
+
# Extract measure from label - usually just "sum(measure_name)"
|
|
233
|
+
measure, dimensions = parse_label_for_dimensions_and_measure(label)
|
|
234
|
+
|
|
235
|
+
# If no dimensions found in label, create default dimensions from info if available
|
|
236
|
+
if not dimensions and info:
|
|
237
|
+
# Info array might contain contextual data like socket/port identifiers
|
|
238
|
+
for i, info_value in enumerate(info):
|
|
239
|
+
dimensions[f'info_{i}'] = str(info_value)
|
|
240
|
+
|
|
241
|
+
# If still no dimensions, create a single default dimension
|
|
242
|
+
if not dimensions:
|
|
243
|
+
dimensions = {'metric_source': 'socket_port'}
|
|
244
|
+
|
|
245
|
+
series_entry = {
|
|
246
|
+
'measure': measure,
|
|
247
|
+
'dimensions': dimensions,
|
|
248
|
+
'units': units,
|
|
249
|
+
'data': {int(point[0]): point[1] for point in data_points if len(point) >= 2}
|
|
250
|
+
}
|
|
251
|
+
parsed_series.append(series_entry)
|
|
252
|
+
|
|
253
|
+
# Collect all timestamps
|
|
254
|
+
all_timestamps.update(series_entry['data'].keys())
|
|
255
|
+
|
|
256
|
+
# Sort timestamps
|
|
257
|
+
sorted_timestamps = sorted(all_timestamps)
|
|
258
|
+
|
|
259
|
+
# Collect all data in long format (one row per timestamp and dimension combination)
|
|
260
|
+
rows = []
|
|
261
|
+
|
|
262
|
+
# Get all unique dimension combinations
|
|
263
|
+
dimension_combos = {}
|
|
264
|
+
for series in parsed_series:
|
|
265
|
+
dim_key = tuple(sorted(series['dimensions'].items()))
|
|
266
|
+
if dim_key not in dimension_combos:
|
|
267
|
+
dimension_combos[dim_key] = {}
|
|
268
|
+
dimension_combos[dim_key][series['measure']] = {
|
|
269
|
+
'data': series['data'],
|
|
270
|
+
'units': series['units']
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
# Create rows for each timestamp and dimension combination
|
|
274
|
+
for dim_combo, measures_data in dimension_combos.items():
|
|
275
|
+
dim_dict = dict(dim_combo)
|
|
276
|
+
|
|
277
|
+
for timestamp in sorted_timestamps:
|
|
278
|
+
# Build row data for this timestamp
|
|
279
|
+
row_data = {
|
|
280
|
+
'timestamp_period': format_timestamp(timestamp)
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
# Add dimension values
|
|
284
|
+
for key, value in dim_dict.items():
|
|
285
|
+
row_data[key] = value
|
|
286
|
+
|
|
287
|
+
# Add measure values for this timestamp
|
|
288
|
+
for measure, measure_info in measures_data.items():
|
|
289
|
+
value = measure_info['data'].get(timestamp, '')
|
|
290
|
+
units = measure_info['units']
|
|
291
|
+
|
|
292
|
+
# Convert bytes measures to MB and add appropriate suffix
|
|
293
|
+
if is_bytes_measure(measure, units):
|
|
294
|
+
if value:
|
|
295
|
+
converted_value = convert_bytes_to_mb(value)
|
|
296
|
+
row_data[f'{measure}_mb'] = converted_value
|
|
297
|
+
else:
|
|
298
|
+
row_data[f'{measure}_mb'] = value
|
|
299
|
+
else:
|
|
300
|
+
row_data[measure] = value
|
|
301
|
+
|
|
302
|
+
rows.append(row_data)
|
|
303
|
+
|
|
304
|
+
if not rows:
|
|
305
|
+
return None
|
|
306
|
+
|
|
307
|
+
# Create CSV output
|
|
308
|
+
output = io.StringIO()
|
|
309
|
+
writer = csv.writer(output)
|
|
310
|
+
|
|
311
|
+
# Build header dynamically from all available columns
|
|
312
|
+
all_columns = set()
|
|
313
|
+
for row_data in rows:
|
|
314
|
+
all_columns.update(row_data.keys())
|
|
315
|
+
|
|
316
|
+
# Sort columns with timestamp_period first, then dimensions, then measures
|
|
317
|
+
dimension_columns = []
|
|
318
|
+
measure_columns = []
|
|
319
|
+
|
|
320
|
+
for col in sorted(all_columns):
|
|
321
|
+
if col == 'timestamp_period':
|
|
322
|
+
continue # Will be added first
|
|
323
|
+
elif col.endswith('_mb') or col in ['throughput_downstream', 'throughput_upstream']:
|
|
324
|
+
measure_columns.append(col)
|
|
325
|
+
else:
|
|
326
|
+
dimension_columns.append(col)
|
|
327
|
+
|
|
328
|
+
header = ['timestamp_period'] + sorted(dimension_columns) + sorted(measure_columns)
|
|
329
|
+
writer.writerow(header)
|
|
330
|
+
|
|
331
|
+
# Write data rows
|
|
332
|
+
for row_data in rows:
|
|
333
|
+
row = []
|
|
334
|
+
for col in header:
|
|
335
|
+
value = row_data.get(col, '')
|
|
336
|
+
row.append(value)
|
|
337
|
+
writer.writerow(row)
|
|
338
|
+
|
|
339
|
+
return output.getvalue()
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Formatter Utilities for Cato CLI
|
|
4
|
+
|
|
5
|
+
This module provides shared utility functions and CSV formatting capabilities
|
|
6
|
+
for converting JSON responses from Cato API into various formats.
|
|
7
|
+
|
|
8
|
+
Shared utilities:
|
|
9
|
+
- convert_bytes_to_mb(): Convert bytes values to megabytes with proper formatting
|
|
10
|
+
- format_timestamp(): Convert timestamps to readable format
|
|
11
|
+
- parse_label_for_dimensions_and_measure(): Parse timeseries labels
|
|
12
|
+
- is_bytes_measure(): Determine if a measure represents bytes data
|
|
13
|
+
|
|
14
|
+
CSV formatting support:
|
|
15
|
+
- Records grid (appStats): records[] with fieldsMap + fieldsUnitTypes
|
|
16
|
+
- Long-format timeseries (appStatsTimeSeries, socketPortMetricsTimeSeries): timeseries[] with labels (one row per timestamp)
|
|
17
|
+
- Hierarchical timeseries (userMetrics): sites[] → interfaces[] → timeseries[] (one row per timestamp)
|
|
18
|
+
|
|
19
|
+
All timeseries formatters now use long format (timestamp_period column) for better readability.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
import csv
|
|
23
|
+
import io
|
|
24
|
+
import json
|
|
25
|
+
import re
|
|
26
|
+
from datetime import datetime
|
|
27
|
+
from typing import Dict, List, Any, Optional, Set, Tuple
|
|
28
|
+
|
|
29
|
+
# Note: The specific formatter functions are imported dynamically
|
|
30
|
+
# in the format_to_csv function to avoid circular imports
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# Shared Helper Functions
|
|
34
|
+
|
|
35
|
+
def format_timestamp(timestamp_ms: int) -> str:
|
|
36
|
+
"""
|
|
37
|
+
Convert timestamp from milliseconds to readable format
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
timestamp_ms: Timestamp in milliseconds
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Formatted timestamp string in UTC
|
|
44
|
+
"""
|
|
45
|
+
try:
|
|
46
|
+
# Convert milliseconds to seconds for datetime
|
|
47
|
+
timestamp_sec = timestamp_ms / 1000
|
|
48
|
+
dt = datetime.utcfromtimestamp(timestamp_sec)
|
|
49
|
+
return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
|
|
50
|
+
except (ValueError, OSError):
|
|
51
|
+
return str(timestamp_ms)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def convert_bytes_to_mb(value: Any) -> str:
|
|
55
|
+
"""
|
|
56
|
+
Convert bytes value to megabytes with proper formatting
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
value: The value to convert (should be numeric)
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Formatted MB value as string
|
|
63
|
+
"""
|
|
64
|
+
if not value or not str(value).replace('.', '').replace('-', '').isdigit():
|
|
65
|
+
return str(value) if value is not None else ''
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
# Convert bytes to megabytes (divide by 1,048,576)
|
|
69
|
+
mb_value = float(value) / 1048576
|
|
70
|
+
# Format to 3 decimal places, but remove trailing zeros
|
|
71
|
+
return f"{mb_value:.3f}".rstrip('0').rstrip('.')
|
|
72
|
+
except (ValueError, ZeroDivisionError):
|
|
73
|
+
return str(value) if value is not None else ''
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def parse_label_for_dimensions_and_measure(label: str) -> Tuple[str, Dict[str, str]]:
|
|
77
|
+
"""
|
|
78
|
+
Parse timeseries label to extract measure and dimensions
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
label: Label like "sum(traffic) for application_name='App', user_name='User'"
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Tuple of (measure, dimensions_dict)
|
|
85
|
+
"""
|
|
86
|
+
measure = ""
|
|
87
|
+
dimensions = {}
|
|
88
|
+
|
|
89
|
+
if ' for ' in label:
|
|
90
|
+
measure_part, dim_part = label.split(' for ', 1)
|
|
91
|
+
# Extract measure (e.g., "sum(traffic)")
|
|
92
|
+
if '(' in measure_part and ')' in measure_part:
|
|
93
|
+
measure = measure_part.split('(')[1].split(')')[0]
|
|
94
|
+
|
|
95
|
+
# Parse dimensions using regex for better handling of quoted values
|
|
96
|
+
# Matches: key='value' or key="value" or key=value
|
|
97
|
+
dim_pattern = r'(\w+)=[\'"]*([^,\'"]+)[\'"]*'
|
|
98
|
+
matches = re.findall(dim_pattern, dim_part)
|
|
99
|
+
for key, value in matches:
|
|
100
|
+
dimensions[key.strip()] = value.strip()
|
|
101
|
+
else:
|
|
102
|
+
# Fallback: use the whole label as measure
|
|
103
|
+
measure = label
|
|
104
|
+
|
|
105
|
+
return measure, dimensions
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def is_bytes_measure(measure: str, units: str = "") -> bool:
|
|
109
|
+
"""
|
|
110
|
+
Determine if a measure represents bytes data that should be converted to MB
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
measure: The measure name
|
|
114
|
+
units: The units field if available
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
True if this measure should be converted to MB
|
|
118
|
+
"""
|
|
119
|
+
bytes_measures = {
|
|
120
|
+
'downstream', 'upstream', 'traffic', 'bytes', 'bytesDownstream',
|
|
121
|
+
'bytesUpstream', 'bytesTotal', 'throughput_downstream', 'throughput_upstream'
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
# Check if measure name indicates bytes
|
|
125
|
+
if measure.lower() in bytes_measures:
|
|
126
|
+
return True
|
|
127
|
+
|
|
128
|
+
# Check if measure contains bytes-related keywords
|
|
129
|
+
if any(keyword in measure.lower() for keyword in ['bytes', 'throughput']):
|
|
130
|
+
return True
|
|
131
|
+
|
|
132
|
+
# Check units field
|
|
133
|
+
if units and 'bytes' in units.lower():
|
|
134
|
+
return True
|
|
135
|
+
|
|
136
|
+
return False
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def build_wide_timeseries_header(dimension_names: List[str], measures: List[str],
|
|
140
|
+
sorted_timestamps: List[int], bytes_measures: Set[str]) -> List[str]:
|
|
141
|
+
"""
|
|
142
|
+
Build header for wide-format timeseries CSV
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
dimension_names: List of dimension column names
|
|
146
|
+
measures: List of measure names
|
|
147
|
+
sorted_timestamps: List of timestamps in order
|
|
148
|
+
bytes_measures: Set of measures that should have _mb suffix
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Complete header row as list of strings
|
|
152
|
+
"""
|
|
153
|
+
header = dimension_names.copy()
|
|
154
|
+
|
|
155
|
+
# Add timestamp and measure columns for each time period
|
|
156
|
+
for i, timestamp in enumerate(sorted_timestamps, 1):
|
|
157
|
+
header.append(f'timestamp_period_{i}')
|
|
158
|
+
for measure in measures:
|
|
159
|
+
if measure in bytes_measures:
|
|
160
|
+
header.append(f'{measure}_period_{i}_mb')
|
|
161
|
+
else:
|
|
162
|
+
header.append(f'{measure}_period_{i}')
|
|
163
|
+
|
|
164
|
+
return header
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def format_to_csv(response_data: Dict[str, Any], operation_name: str) -> str:
|
|
179
|
+
"""
|
|
180
|
+
Main function to format response data to CSV based on operation type
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
response_data: JSON response data
|
|
184
|
+
operation_name: Name of the operation (e.g., 'query.appStats')
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
CSV formatted string
|
|
188
|
+
"""
|
|
189
|
+
if operation_name == 'query.appStats':
|
|
190
|
+
# Dynamic import to avoid circular imports
|
|
191
|
+
try:
|
|
192
|
+
from .formatter_app_stats import format_app_stats
|
|
193
|
+
except ImportError:
|
|
194
|
+
try:
|
|
195
|
+
from catocli.Utils.formatter_app_stats import format_app_stats
|
|
196
|
+
except ImportError:
|
|
197
|
+
from formatter_app_stats import format_app_stats
|
|
198
|
+
return format_app_stats(response_data, output_format='csv')
|
|
199
|
+
elif operation_name == 'query.appStatsTimeSeries':
|
|
200
|
+
# Dynamic import to avoid circular imports
|
|
201
|
+
try:
|
|
202
|
+
from .formatter_app_stats_timeseries import format_app_stats_timeseries
|
|
203
|
+
except ImportError:
|
|
204
|
+
try:
|
|
205
|
+
from catocli.Utils.formatter_app_stats_timeseries import format_app_stats_timeseries
|
|
206
|
+
except ImportError:
|
|
207
|
+
from formatter_app_stats_timeseries import format_app_stats_timeseries
|
|
208
|
+
return format_app_stats_timeseries(response_data, output_format='csv')
|
|
209
|
+
elif operation_name == 'query.socketPortMetricsTimeSeries':
|
|
210
|
+
# Dynamic import to avoid circular imports
|
|
211
|
+
try:
|
|
212
|
+
from .formatter_socket_port_metrics_timeseries import format_socket_port_metrics_timeseries
|
|
213
|
+
except ImportError:
|
|
214
|
+
try:
|
|
215
|
+
from catocli.Utils.formatter_socket_port_metrics_timeseries import format_socket_port_metrics_timeseries
|
|
216
|
+
except ImportError:
|
|
217
|
+
from formatter_socket_port_metrics_timeseries import format_socket_port_metrics_timeseries
|
|
218
|
+
return format_socket_port_metrics_timeseries(response_data, output_format='csv')
|
|
219
|
+
elif operation_name == 'query.accountMetrics':
|
|
220
|
+
# Dynamic import to avoid circular imports
|
|
221
|
+
try:
|
|
222
|
+
from .formatter_account_metrics import format_account_metrics
|
|
223
|
+
except ImportError:
|
|
224
|
+
try:
|
|
225
|
+
from catocli.Utils.formatter_account_metrics import format_account_metrics
|
|
226
|
+
except ImportError:
|
|
227
|
+
from formatter_account_metrics import format_account_metrics
|
|
228
|
+
return format_account_metrics(response_data, output_format='csv')
|
|
229
|
+
elif operation_name == 'query.eventsTimeSeries':
|
|
230
|
+
# Dynamic import to avoid circular imports
|
|
231
|
+
try:
|
|
232
|
+
from .formatter_events_timeseries import format_events_timeseries
|
|
233
|
+
except ImportError:
|
|
234
|
+
try:
|
|
235
|
+
from catocli.Utils.formatter_events_timeseries import format_events_timeseries
|
|
236
|
+
except ImportError:
|
|
237
|
+
from formatter_events_timeseries import format_events_timeseries
|
|
238
|
+
return format_events_timeseries(response_data, output_format='csv')
|
|
239
|
+
elif operation_name == 'query.socketPortMetrics':
|
|
240
|
+
# Dynamic import to avoid circular imports
|
|
241
|
+
try:
|
|
242
|
+
from .formatter_socket_port_metrics import format_socket_port_metrics
|
|
243
|
+
except ImportError:
|
|
244
|
+
try:
|
|
245
|
+
from catocli.Utils.formatter_socket_port_metrics import format_socket_port_metrics
|
|
246
|
+
except ImportError:
|
|
247
|
+
from formatter_socket_port_metrics import format_socket_port_metrics
|
|
248
|
+
return format_socket_port_metrics(response_data, output_format='csv')
|
|
249
|
+
else:
|
|
250
|
+
# Default: try to convert any JSON response to simple CSV
|
|
251
|
+
return json.dumps(response_data, indent=2)
|
catocli/__init__.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "3.0.
|
|
1
|
+
__version__ = "3.0.24"
|
|
2
2
|
__cato_host__ = "https://api.catonetworks.com/api/v1/graphql2"
|
catocli/clisettings.json
CHANGED
|
@@ -26,10 +26,42 @@
|
|
|
26
26
|
"ContainerQueries": true,
|
|
27
27
|
"SiteQueries": true
|
|
28
28
|
},
|
|
29
|
-
"
|
|
30
|
-
"query.appStats":
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
29
|
+
"queryOperationDefaultFormatOverrides": {
|
|
30
|
+
"query.appStats": {
|
|
31
|
+
"enabled": true,
|
|
32
|
+
"default_format": "json",
|
|
33
|
+
"format_function": "format_app_stats",
|
|
34
|
+
"supports_raw_flag": true
|
|
35
|
+
},
|
|
36
|
+
"query.appStatsTimeSeries": {
|
|
37
|
+
"enabled": true,
|
|
38
|
+
"default_format": "json",
|
|
39
|
+
"format_function": "format_app_stats_timeseries",
|
|
40
|
+
"supports_raw_flag": true
|
|
41
|
+
},
|
|
42
|
+
"query.socketPortMetricsTimeSeries": {
|
|
43
|
+
"enabled": true,
|
|
44
|
+
"default_format": "json",
|
|
45
|
+
"format_function": "format_socket_port_metrics_timeseries",
|
|
46
|
+
"supports_raw_flag": true
|
|
47
|
+
},
|
|
48
|
+
"query.accountMetrics": {
|
|
49
|
+
"enabled": true,
|
|
50
|
+
"default_format": "json",
|
|
51
|
+
"format_function": "format_account_metrics",
|
|
52
|
+
"supports_raw_flag": true
|
|
53
|
+
},
|
|
54
|
+
"query.eventsTimeSeries": {
|
|
55
|
+
"enabled": true,
|
|
56
|
+
"default_format": "json",
|
|
57
|
+
"format_function": "format_events_timeseries",
|
|
58
|
+
"supports_raw_flag": true
|
|
59
|
+
},
|
|
60
|
+
"query.socketPortMetrics": {
|
|
61
|
+
"enabled": true,
|
|
62
|
+
"default_format": "json",
|
|
63
|
+
"format_function": "format_socket_port_metrics",
|
|
64
|
+
"supports_raw_flag": true
|
|
65
|
+
}
|
|
34
66
|
}
|
|
35
67
|
}
|