catocli 2.1.2__py3-none-any.whl → 2.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of catocli might be problematic. Click here for more details.

Files changed (98) hide show
  1. catocli/Utils/clidriver.py +18 -18
  2. catocli/Utils/cliutils.py +165 -0
  3. catocli/Utils/csv_formatter.py +652 -0
  4. catocli/__init__.py +1 -1
  5. catocli/parsers/custom/export_rules/__init__.py +0 -4
  6. catocli/parsers/custom/export_sites/__init__.py +4 -3
  7. catocli/parsers/custom/export_sites/export_sites.py +198 -55
  8. catocli/parsers/custom/import_sites_to_tf/import_sites_to_tf.py +473 -393
  9. catocli/parsers/customParserApiClient.py +444 -38
  10. catocli/parsers/custom_private/__init__.py +19 -13
  11. catocli/parsers/mutation_accountManagement/__init__.py +21 -0
  12. catocli/parsers/mutation_accountManagement_disableAccount/README.md +15 -0
  13. catocli/parsers/mutation_admin/__init__.py +12 -0
  14. catocli/parsers/mutation_container/__init__.py +18 -0
  15. catocli/parsers/mutation_enterpriseDirectory/__init__.py +8 -0
  16. catocli/parsers/mutation_groups/__init__.py +6 -0
  17. catocli/parsers/mutation_hardware/__init__.py +2 -0
  18. catocli/parsers/mutation_policy/__init__.py +378 -0
  19. catocli/parsers/mutation_policy_antiMalwareFileHash_addRule/README.md +20 -0
  20. catocli/parsers/mutation_policy_antiMalwareFileHash_addSection/README.md +20 -0
  21. catocli/parsers/mutation_policy_antiMalwareFileHash_createPolicyRevision/README.md +20 -0
  22. catocli/parsers/mutation_policy_antiMalwareFileHash_discardPolicyRevision/README.md +20 -0
  23. catocli/parsers/mutation_policy_antiMalwareFileHash_moveRule/README.md +20 -0
  24. catocli/parsers/mutation_policy_antiMalwareFileHash_moveSection/README.md +20 -0
  25. catocli/parsers/mutation_policy_antiMalwareFileHash_publishPolicyRevision/README.md +20 -0
  26. catocli/parsers/mutation_policy_antiMalwareFileHash_removeRule/README.md +20 -0
  27. catocli/parsers/mutation_policy_antiMalwareFileHash_removeSection/README.md +20 -0
  28. catocli/parsers/mutation_policy_antiMalwareFileHash_updatePolicy/README.md +20 -0
  29. catocli/parsers/mutation_policy_antiMalwareFileHash_updateRule/README.md +20 -0
  30. catocli/parsers/mutation_policy_antiMalwareFileHash_updateSection/README.md +20 -0
  31. catocli/parsers/mutation_sandbox/__init__.py +4 -0
  32. catocli/parsers/mutation_site/__init__.py +72 -0
  33. catocli/parsers/mutation_sites/__init__.py +72 -0
  34. catocli/parsers/mutation_xdr/__init__.py +6 -0
  35. catocli/parsers/query_accountBySubdomain/__init__.py +2 -0
  36. catocli/parsers/query_accountManagement/__init__.py +2 -0
  37. catocli/parsers/query_accountMetrics/__init__.py +6 -0
  38. catocli/parsers/query_accountRoles/__init__.py +2 -0
  39. catocli/parsers/query_accountSnapshot/__init__.py +2 -0
  40. catocli/parsers/query_admin/__init__.py +2 -0
  41. catocli/parsers/query_admins/__init__.py +2 -0
  42. catocli/parsers/query_appStats/__init__.py +6 -0
  43. catocli/parsers/query_appStatsTimeSeries/README.md +3 -0
  44. catocli/parsers/query_appStatsTimeSeries/__init__.py +6 -0
  45. catocli/parsers/query_auditFeed/__init__.py +2 -0
  46. catocli/parsers/query_catalogs/__init__.py +2 -0
  47. catocli/parsers/query_container/__init__.py +2 -0
  48. catocli/parsers/query_devices/README.md +1 -1
  49. catocli/parsers/query_devices/__init__.py +2 -0
  50. catocli/parsers/query_enterpriseDirectory/__init__.py +2 -0
  51. catocli/parsers/query_entityLookup/__init__.py +2 -0
  52. catocli/parsers/query_events/__init__.py +2 -0
  53. catocli/parsers/query_eventsFeed/__init__.py +2 -0
  54. catocli/parsers/query_eventsTimeSeries/__init__.py +2 -0
  55. catocli/parsers/query_groups/__init__.py +6 -0
  56. catocli/parsers/query_hardware/README.md +1 -1
  57. catocli/parsers/query_hardware/__init__.py +2 -0
  58. catocli/parsers/query_hardwareManagement/__init__.py +2 -0
  59. catocli/parsers/query_licensing/__init__.py +2 -0
  60. catocli/parsers/query_policy/__init__.py +37 -0
  61. catocli/parsers/query_policy_antiMalwareFileHash_policy/README.md +19 -0
  62. catocli/parsers/query_popLocations/__init__.py +2 -0
  63. catocli/parsers/query_sandbox/__init__.py +2 -0
  64. catocli/parsers/query_servicePrincipalAdmin/__init__.py +2 -0
  65. catocli/parsers/query_site/__init__.py +33 -0
  66. catocli/parsers/query_siteLocation/__init__.py +2 -0
  67. catocli/parsers/query_site_siteGeneralDetails/README.md +19 -0
  68. catocli/parsers/query_socketPortMetrics/__init__.py +2 -0
  69. catocli/parsers/query_socketPortMetricsTimeSeries/__init__.py +6 -0
  70. catocli/parsers/query_subDomains/__init__.py +2 -0
  71. catocli/parsers/query_xdr/__init__.py +4 -0
  72. catocli/parsers/raw/__init__.py +3 -1
  73. {catocli-2.1.2.dist-info → catocli-2.1.4.dist-info}/METADATA +1 -1
  74. {catocli-2.1.2.dist-info → catocli-2.1.4.dist-info}/RECORD +98 -66
  75. models/mutation.accountManagement.disableAccount.json +545 -0
  76. models/mutation.policy.antiMalwareFileHash.addRule.json +2068 -0
  77. models/mutation.policy.antiMalwareFileHash.addSection.json +1350 -0
  78. models/mutation.policy.antiMalwareFileHash.createPolicyRevision.json +1822 -0
  79. models/mutation.policy.antiMalwareFileHash.discardPolicyRevision.json +1758 -0
  80. models/mutation.policy.antiMalwareFileHash.moveRule.json +1552 -0
  81. models/mutation.policy.antiMalwareFileHash.moveSection.json +1251 -0
  82. models/mutation.policy.antiMalwareFileHash.publishPolicyRevision.json +1813 -0
  83. models/mutation.policy.antiMalwareFileHash.removeRule.json +1204 -0
  84. models/mutation.policy.antiMalwareFileHash.removeSection.json +954 -0
  85. models/mutation.policy.antiMalwareFileHash.updatePolicy.json +1834 -0
  86. models/mutation.policy.antiMalwareFileHash.updateRule.json +1757 -0
  87. models/mutation.policy.antiMalwareFileHash.updateSection.json +1105 -0
  88. models/mutation.site.updateSiteGeneralDetails.json +3 -3
  89. models/mutation.sites.updateSiteGeneralDetails.json +3 -3
  90. models/query.devices.json +249 -2
  91. models/query.hardware.json +224 -0
  92. models/query.policy.antiMalwareFileHash.policy.json +1583 -0
  93. models/query.site.siteGeneralDetails.json +899 -0
  94. schema/catolib.py +52 -14
  95. {catocli-2.1.2.dist-info → catocli-2.1.4.dist-info}/WHEEL +0 -0
  96. {catocli-2.1.2.dist-info → catocli-2.1.4.dist-info}/entry_points.txt +0 -0
  97. {catocli-2.1.2.dist-info → catocli-2.1.4.dist-info}/licenses/LICENSE +0 -0
  98. {catocli-2.1.2.dist-info → catocli-2.1.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,652 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ CSV Formatter for Cato CLI
4
+
5
+ This module provides functions to convert JSON responses from Cato API
6
+ into CSV format, with special handling for timeseries data in wide format.
7
+
8
+ Supports multiple response patterns:
9
+ - Records grid (appStats): records[] with fieldsMap + fieldsUnitTypes
10
+ - Flat timeseries (appStatsTimeSeries, socketPortMetricsTimeSeries): timeseries[] with labels
11
+ - Hierarchical timeseries (accountMetrics): sites[] → interfaces[] → timeseries[]
12
+ """
13
+
14
+ import csv
15
+ import io
16
+ import json
17
+ import re
18
+ from datetime import datetime
19
+ from typing import Dict, List, Any, Optional, Set, Tuple
20
+
21
+
22
+ # Shared Helper Functions
23
+
24
+ def format_timestamp(timestamp_ms: int) -> str:
25
+ """
26
+ Convert timestamp from milliseconds to readable format
27
+
28
+ Args:
29
+ timestamp_ms: Timestamp in milliseconds
30
+
31
+ Returns:
32
+ Formatted timestamp string in UTC
33
+ """
34
+ try:
35
+ # Convert milliseconds to seconds for datetime
36
+ timestamp_sec = timestamp_ms / 1000
37
+ dt = datetime.utcfromtimestamp(timestamp_sec)
38
+ return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
39
+ except (ValueError, OSError):
40
+ return str(timestamp_ms)
41
+
42
+
43
+ def convert_bytes_to_mb(value: Any) -> str:
44
+ """
45
+ Convert bytes value to megabytes with proper formatting
46
+
47
+ Args:
48
+ value: The value to convert (should be numeric)
49
+
50
+ Returns:
51
+ Formatted MB value as string
52
+ """
53
+ if not value or not str(value).replace('.', '').replace('-', '').isdigit():
54
+ return str(value) if value is not None else ''
55
+
56
+ try:
57
+ # Convert bytes to megabytes (divide by 1,048,576)
58
+ mb_value = float(value) / 1048576
59
+ # Format to 3 decimal places, but remove trailing zeros
60
+ return f"{mb_value:.3f}".rstrip('0').rstrip('.')
61
+ except (ValueError, ZeroDivisionError):
62
+ return str(value) if value is not None else ''
63
+
64
+
65
+ def parse_label_for_dimensions_and_measure(label: str) -> Tuple[str, Dict[str, str]]:
66
+ """
67
+ Parse timeseries label to extract measure and dimensions
68
+
69
+ Args:
70
+ label: Label like "sum(traffic) for application_name='App', user_name='User'"
71
+
72
+ Returns:
73
+ Tuple of (measure, dimensions_dict)
74
+ """
75
+ measure = ""
76
+ dimensions = {}
77
+
78
+ if ' for ' in label:
79
+ measure_part, dim_part = label.split(' for ', 1)
80
+ # Extract measure (e.g., "sum(traffic)")
81
+ if '(' in measure_part and ')' in measure_part:
82
+ measure = measure_part.split('(')[1].split(')')[0]
83
+
84
+ # Parse dimensions using regex for better handling of quoted values
85
+ # Matches: key='value' or key="value" or key=value
86
+ dim_pattern = r'(\w+)=[\'"]*([^,\'"]+)[\'"]*'
87
+ matches = re.findall(dim_pattern, dim_part)
88
+ for key, value in matches:
89
+ dimensions[key.strip()] = value.strip()
90
+ else:
91
+ # Fallback: use the whole label as measure
92
+ measure = label
93
+
94
+ return measure, dimensions
95
+
96
+
97
+ def is_bytes_measure(measure: str, units: str = "") -> bool:
98
+ """
99
+ Determine if a measure represents bytes data that should be converted to MB
100
+
101
+ Args:
102
+ measure: The measure name
103
+ units: The units field if available
104
+
105
+ Returns:
106
+ True if this measure should be converted to MB
107
+ """
108
+ bytes_measures = {
109
+ 'downstream', 'upstream', 'traffic', 'bytes', 'bytesDownstream',
110
+ 'bytesUpstream', 'bytesTotal', 'throughput_downstream', 'throughput_upstream'
111
+ }
112
+
113
+ # Check if measure name indicates bytes
114
+ if measure.lower() in bytes_measures:
115
+ return True
116
+
117
+ # Check if measure contains bytes-related keywords
118
+ if any(keyword in measure.lower() for keyword in ['bytes', 'throughput']):
119
+ return True
120
+
121
+ # Check units field
122
+ if units and 'bytes' in units.lower():
123
+ return True
124
+
125
+ return False
126
+
127
+
128
+ def build_wide_timeseries_header(dimension_names: List[str], measures: List[str],
129
+ sorted_timestamps: List[int], bytes_measures: Set[str]) -> List[str]:
130
+ """
131
+ Build header for wide-format timeseries CSV
132
+
133
+ Args:
134
+ dimension_names: List of dimension column names
135
+ measures: List of measure names
136
+ sorted_timestamps: List of timestamps in order
137
+ bytes_measures: Set of measures that should have _mb suffix
138
+
139
+ Returns:
140
+ Complete header row as list of strings
141
+ """
142
+ header = dimension_names.copy()
143
+
144
+ # Add timestamp and measure columns for each time period
145
+ for i, timestamp in enumerate(sorted_timestamps, 1):
146
+ header.append(f'timestamp_period_{i}')
147
+ for measure in measures:
148
+ if measure in bytes_measures:
149
+ header.append(f'{measure}_period_{i}_mb')
150
+ else:
151
+ header.append(f'{measure}_period_{i}')
152
+
153
+ return header
154
+
155
+
156
+ def format_app_stats_to_csv(response_data: Dict[str, Any]) -> str:
157
+ """
158
+ Convert appStats JSON response to CSV format
159
+
160
+ Args:
161
+ response_data: JSON response from appStats query
162
+
163
+ Returns:
164
+ CSV formatted string
165
+ """
166
+ if not response_data or not isinstance(response_data, dict):
167
+ return ""
168
+
169
+ # Check for API errors
170
+ if 'errors' in response_data:
171
+ return ""
172
+
173
+ if 'data' not in response_data or 'appStats' not in response_data['data']:
174
+ return ""
175
+
176
+ app_stats = response_data['data']['appStats']
177
+ if not app_stats or not isinstance(app_stats, dict):
178
+ return ""
179
+
180
+ records = app_stats.get('records', [])
181
+
182
+ if not records:
183
+ return ""
184
+
185
+ # Get all possible field names from the first record's fieldsMap
186
+ first_record = records[0]
187
+ field_names = list(first_record.get('fieldsMap', {}).keys())
188
+ field_unit_types = first_record.get('fieldsUnitTypes', [])
189
+
190
+ # Create CSV output
191
+ output = io.StringIO()
192
+ writer = csv.writer(output)
193
+
194
+ # Create headers with _mb suffix for bytes fields
195
+ headers = []
196
+ for i, field_name in enumerate(field_names):
197
+ if i < len(field_unit_types) and field_unit_types[i] == 'bytes':
198
+ headers.append(f'{field_name}_mb')
199
+ else:
200
+ headers.append(field_name)
201
+
202
+ # Write header
203
+ writer.writerow(headers)
204
+
205
+ # Write data rows
206
+ for record in records:
207
+ fields_map = record.get('fieldsMap', {})
208
+ record_unit_types = record.get('fieldsUnitTypes', [])
209
+ row = []
210
+
211
+ for i, field in enumerate(field_names):
212
+ value = fields_map.get(field, '')
213
+
214
+ # Convert bytes to MB if the field type is bytes
215
+ if (i < len(record_unit_types) and
216
+ record_unit_types[i] == 'bytes' and
217
+ value and str(value).replace('.', '').replace('-', '').isdigit()):
218
+ try:
219
+ # Convert bytes to megabytes (divide by 1,048,576)
220
+ mb_value = float(value) / 1048576
221
+ # Format to 3 decimal places, but remove trailing zeros
222
+ formatted_value = f"{mb_value:.3f}".rstrip('0').rstrip('.')
223
+ row.append(formatted_value)
224
+ except (ValueError, ZeroDivisionError):
225
+ row.append(value)
226
+ else:
227
+ row.append(value)
228
+
229
+ writer.writerow(row)
230
+
231
+ return output.getvalue()
232
+
233
+
234
+ def format_app_stats_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
235
+ """
236
+ Convert appStatsTimeSeries JSON response to wide-format CSV
237
+ Similar to the reference sccm_app_stats_wide_format.csv
238
+
239
+ Args:
240
+ response_data: JSON response from appStatsTimeSeries query
241
+
242
+ Returns:
243
+ CSV formatted string in wide format with timestamps as columns
244
+ """
245
+ if not response_data or 'data' not in response_data or 'appStatsTimeSeries' not in response_data['data']:
246
+ return ""
247
+
248
+ app_stats_ts = response_data['data']['appStatsTimeSeries']
249
+ timeseries = app_stats_ts.get('timeseries', [])
250
+
251
+ if not timeseries:
252
+ return ""
253
+
254
+ # Parse dimension information and measures from labels
255
+ # Labels are like: "sum(traffic) for application_name='Google Applications', user_name='PM Analyst'"
256
+ parsed_series = []
257
+ all_timestamps = set()
258
+
259
+ for series in timeseries:
260
+ label = series.get('label', '')
261
+ data_points = series.get('data', [])
262
+
263
+ # Extract measure and dimensions from label
264
+ # Example: "sum(traffic) for application_name='Google Applications', user_name='PM Analyst'"
265
+ measure = ""
266
+ dimensions = {}
267
+
268
+ try:
269
+ if ' for ' in label:
270
+ measure_part, dim_part = label.split(' for ', 1)
271
+ # Extract measure (e.g., "sum(traffic)")
272
+ if '(' in measure_part and ')' in measure_part:
273
+ measure = measure_part.split('(')[1].split(')')[0]
274
+
275
+ # Parse dimensions using regex for better handling of quoted values
276
+ # Matches: key='value' or key="value" or key=value
277
+ dim_pattern = r'(\w+)=[\'"]*([^,\'"]+)[\'"]*'
278
+ matches = re.findall(dim_pattern, dim_part)
279
+ for key, value in matches:
280
+ dimensions[key.strip()] = value.strip()
281
+ else:
282
+ # Fallback: use the whole label as measure
283
+ measure = label
284
+
285
+ # Create series entry with safe data parsing
286
+ data_dict = {}
287
+ for point in data_points:
288
+ if isinstance(point, (list, tuple)) and len(point) >= 2:
289
+ data_dict[int(point[0])] = point[1]
290
+
291
+ series_entry = {
292
+ 'measure': measure,
293
+ 'dimensions': dimensions,
294
+ 'data': data_dict
295
+ }
296
+ parsed_series.append(series_entry)
297
+
298
+ # Collect all timestamps
299
+ all_timestamps.update(series_entry['data'].keys())
300
+ except Exception as e:
301
+ print(f"DEBUG: Error processing series with label '{label}': {e}")
302
+ continue
303
+
304
+ # Sort timestamps
305
+ sorted_timestamps = sorted(all_timestamps)
306
+
307
+ # Get all unique dimension combinations
308
+ dimension_combos = {}
309
+ for series in parsed_series:
310
+ try:
311
+ dim_key = tuple(sorted(series['dimensions'].items()))
312
+ if dim_key not in dimension_combos:
313
+ dimension_combos[dim_key] = {}
314
+ dimension_combos[dim_key][series['measure']] = series['data']
315
+ except Exception as e:
316
+ print(f"DEBUG: Error processing dimension combination for series: {e}")
317
+ print(f"DEBUG: Series dimensions: {series.get('dimensions', {})}")
318
+ continue
319
+
320
+ # Create CSV output
321
+ output = io.StringIO()
322
+ writer = csv.writer(output)
323
+
324
+ # Build header
325
+ dimension_names = set()
326
+ measures = set()
327
+ for series in parsed_series:
328
+ dimension_names.update(series['dimensions'].keys())
329
+ measures.add(series['measure'])
330
+
331
+ dimension_names = sorted(dimension_names)
332
+ measures = sorted(measures)
333
+
334
+ header = dimension_names.copy()
335
+ # Add timestamp and measure columns for each time period
336
+ for i, timestamp in enumerate(sorted_timestamps, 1):
337
+ formatted_ts = format_timestamp(timestamp)
338
+ header.append(f'timestamp_period_{i}')
339
+ for measure in measures:
340
+ # Add _mb suffix for bytes measures
341
+ if measure in ['downstream', 'upstream', 'traffic']:
342
+ header.append(f'{measure}_period_{i}_mb')
343
+ else:
344
+ header.append(f'{measure}_period_{i}')
345
+
346
+ writer.writerow(header)
347
+
348
+ # Write data rows
349
+ for dim_combo, measures_data in dimension_combos.items():
350
+ row = []
351
+
352
+ # Add dimension values
353
+ dim_dict = dict(dim_combo)
354
+ for dim_name in dimension_names:
355
+ row.append(dim_dict.get(dim_name, ''))
356
+
357
+ # Add timestamp and measure data for each period
358
+ for timestamp in sorted_timestamps:
359
+ formatted_ts = format_timestamp(timestamp)
360
+ row.append(formatted_ts)
361
+
362
+ for measure in measures:
363
+ value = measures_data.get(measure, {}).get(timestamp, '')
364
+ # Convert bytes measures to MB
365
+ if measure in ['downstream', 'upstream', 'traffic'] and value and str(value).replace('.', '').replace('-', '').isdigit():
366
+ try:
367
+ # Convert bytes to megabytes
368
+ mb_value = float(value) / 1048576
369
+ formatted_value = f"{mb_value:.3f}".rstrip('0').rstrip('.')
370
+ row.append(formatted_value)
371
+ except (ValueError, ZeroDivisionError):
372
+ row.append(value)
373
+ else:
374
+ row.append(value)
375
+
376
+ writer.writerow(row)
377
+
378
+ return output.getvalue()
379
+
380
+
381
+ def format_socket_port_metrics_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
382
+ """
383
+ Convert socketPortMetricsTimeSeries JSON response to wide-format CSV
384
+
385
+ Args:
386
+ response_data: JSON response from socketPortMetricsTimeSeries query
387
+
388
+ Returns:
389
+ CSV formatted string in wide format with timestamps as columns
390
+ """
391
+ if not response_data or 'data' not in response_data or 'socketPortMetricsTimeSeries' not in response_data['data']:
392
+ return ""
393
+
394
+ socket_metrics_ts = response_data['data']['socketPortMetricsTimeSeries']
395
+ timeseries = socket_metrics_ts.get('timeseries', [])
396
+
397
+ if not timeseries:
398
+ return ""
399
+
400
+ # Parse measures from labels - these are simpler than appStatsTimeSeries
401
+ # Labels are like: "sum(throughput_downstream)" with no dimensions
402
+ parsed_series = []
403
+ all_timestamps = set()
404
+
405
+ for series in timeseries:
406
+ label = series.get('label', '')
407
+ data_points = series.get('data', [])
408
+ units = series.get('unitsTimeseries', '')
409
+ info = series.get('info', [])
410
+
411
+ # Extract measure from label - usually just "sum(measure_name)"
412
+ measure, dimensions = parse_label_for_dimensions_and_measure(label)
413
+
414
+ # If no dimensions found in label, create default dimensions from info if available
415
+ if not dimensions and info:
416
+ # Info array might contain contextual data like socket/port identifiers
417
+ for i, info_value in enumerate(info):
418
+ dimensions[f'info_{i}'] = str(info_value)
419
+
420
+ # If still no dimensions, create a single default dimension
421
+ if not dimensions:
422
+ dimensions = {'metric_source': 'socket_port'}
423
+
424
+ series_entry = {
425
+ 'measure': measure,
426
+ 'dimensions': dimensions,
427
+ 'units': units,
428
+ 'data': {int(point[0]): point[1] for point in data_points if len(point) >= 2}
429
+ }
430
+ parsed_series.append(series_entry)
431
+
432
+ # Collect all timestamps
433
+ all_timestamps.update(series_entry['data'].keys())
434
+
435
+ # Sort timestamps
436
+ sorted_timestamps = sorted(all_timestamps)
437
+
438
+ # Get all unique dimension combinations
439
+ dimension_combos = {}
440
+ for series in parsed_series:
441
+ dim_key = tuple(sorted(series['dimensions'].items()))
442
+ if dim_key not in dimension_combos:
443
+ dimension_combos[dim_key] = {}
444
+ dimension_combos[dim_key][series['measure']] = {
445
+ 'data': series['data'],
446
+ 'units': series['units']
447
+ }
448
+
449
+ # Create CSV output
450
+ output = io.StringIO()
451
+ writer = csv.writer(output)
452
+
453
+ # Build header
454
+ dimension_names = set()
455
+ measures = set()
456
+ bytes_measures = set()
457
+
458
+ for series in parsed_series:
459
+ dimension_names.update(series['dimensions'].keys())
460
+ measures.add(series['measure'])
461
+
462
+ # Check if this measure should be converted to MB
463
+ if is_bytes_measure(series['measure'], series['units']):
464
+ bytes_measures.add(series['measure'])
465
+
466
+ dimension_names = sorted(dimension_names)
467
+ measures = sorted(measures)
468
+
469
+ # Build header using shared helper
470
+ header = build_wide_timeseries_header(dimension_names, measures, sorted_timestamps, bytes_measures)
471
+ writer.writerow(header)
472
+
473
+ # Write data rows
474
+ for dim_combo, measures_data in dimension_combos.items():
475
+ row = []
476
+
477
+ # Add dimension values
478
+ dim_dict = dict(dim_combo)
479
+ for dim_name in dimension_names:
480
+ row.append(dim_dict.get(dim_name, ''))
481
+
482
+ # Add timestamp and measure data for each period
483
+ for timestamp in sorted_timestamps:
484
+ formatted_ts = format_timestamp(timestamp)
485
+ row.append(formatted_ts)
486
+
487
+ for measure in measures:
488
+ measure_info = measures_data.get(measure, {})
489
+ value = measure_info.get('data', {}).get(timestamp, '')
490
+
491
+ # Convert bytes measures to MB
492
+ if measure in bytes_measures and value:
493
+ row.append(convert_bytes_to_mb(value))
494
+ else:
495
+ row.append(value)
496
+
497
+ writer.writerow(row)
498
+
499
+ return output.getvalue()
500
+
501
+
502
+ def format_account_metrics_to_csv(response_data: Dict[str, Any]) -> str:
503
+ """
504
+ Convert accountMetrics JSON response to wide-format CSV
505
+
506
+ Args:
507
+ response_data: JSON response from accountMetrics query
508
+
509
+ Returns:
510
+ CSV formatted string in wide format with timestamps as columns
511
+ """
512
+ if not response_data or 'data' not in response_data or 'accountMetrics' not in response_data['data']:
513
+ return ""
514
+
515
+ account_metrics = response_data['data']['accountMetrics']
516
+ sites = account_metrics.get('sites', [])
517
+
518
+ if not sites:
519
+ return ""
520
+
521
+ # Collect all data points
522
+ parsed_series = []
523
+ all_timestamps = set()
524
+ bytes_measures = set()
525
+
526
+ for site in sites:
527
+ site_id = site.get('id', '')
528
+ site_name = site.get('name', '')
529
+ interfaces = site.get('interfaces', [])
530
+
531
+ for interface in interfaces:
532
+ interface_name = interface.get('name', '')
533
+ timeseries_data = interface.get('timeseries', [])
534
+
535
+ for series in timeseries_data:
536
+ label = series.get('label', '')
537
+ data_points = series.get('data', [])
538
+ info = series.get('info', [])
539
+ units = series.get('unitsTimeseries', '')
540
+
541
+ # Create series entry with hierarchical dimensions
542
+ dimensions = {
543
+ 'site_id': site_id,
544
+ 'site_name': site_name,
545
+ 'interface_name': interface_name
546
+ }
547
+
548
+ # Add info fields if available (additional context like interface ID)
549
+ if info:
550
+ for i, info_value in enumerate(info):
551
+ if i == 0: # Usually site ID, skip since we have it
552
+ continue
553
+ elif i == 1: # Interface name, skip since we have it
554
+ continue
555
+ elif i == 2: # Interface ID or similar
556
+ dimensions['interface_id'] = str(info_value)
557
+ else:
558
+ dimensions[f'info_{i}'] = str(info_value)
559
+
560
+ # Check if this measure should be converted to MB
561
+ if is_bytes_measure(label, units):
562
+ bytes_measures.add(label)
563
+
564
+ series_entry = {
565
+ 'measure': label,
566
+ 'dimensions': dimensions,
567
+ 'data': {int(point[0]): point[1] for point in data_points if len(point) >= 2}
568
+ }
569
+ parsed_series.append(series_entry)
570
+
571
+ # Collect all timestamps
572
+ all_timestamps.update(series_entry['data'].keys())
573
+
574
+ # Sort timestamps
575
+ sorted_timestamps = sorted(all_timestamps)
576
+
577
+ # Group by dimension combinations (excluding measure)
578
+ dimension_combos = {}
579
+ for series in parsed_series:
580
+ dim_key = tuple(sorted(series['dimensions'].items()))
581
+ if dim_key not in dimension_combos:
582
+ dimension_combos[dim_key] = {}
583
+ dimension_combos[dim_key][series['measure']] = series['data']
584
+
585
+ # Create CSV output
586
+ output = io.StringIO()
587
+ writer = csv.writer(output)
588
+
589
+ # Build header
590
+ dimension_names = set()
591
+ measures = set()
592
+ for series in parsed_series:
593
+ dimension_names.update(series['dimensions'].keys())
594
+ measures.add(series['measure'])
595
+
596
+ dimension_names = sorted(dimension_names)
597
+ measures = sorted(measures)
598
+
599
+ # Build header using shared helper
600
+ header = build_wide_timeseries_header(dimension_names, measures, sorted_timestamps, bytes_measures)
601
+ writer.writerow(header)
602
+
603
+ # Write data rows
604
+ for dim_combo, measures_data in dimension_combos.items():
605
+ row = []
606
+
607
+ # Add dimension values
608
+ dim_dict = dict(dim_combo)
609
+ for dim_name in dimension_names:
610
+ row.append(dim_dict.get(dim_name, ''))
611
+
612
+ # Add timestamp and measure data for each period
613
+ for timestamp in sorted_timestamps:
614
+ formatted_ts = format_timestamp(timestamp)
615
+ row.append(formatted_ts)
616
+
617
+ for measure in measures:
618
+ value = measures_data.get(measure, {}).get(timestamp, '')
619
+
620
+ # Convert bytes measures to MB
621
+ if measure in bytes_measures and value:
622
+ row.append(convert_bytes_to_mb(value))
623
+ else:
624
+ row.append(value)
625
+
626
+ writer.writerow(row)
627
+
628
+ return output.getvalue()
629
+
630
+
631
+ def format_to_csv(response_data: Dict[str, Any], operation_name: str) -> str:
632
+ """
633
+ Main function to format response data to CSV based on operation type
634
+
635
+ Args:
636
+ response_data: JSON response data
637
+ operation_name: Name of the operation (e.g., 'query.appStats')
638
+
639
+ Returns:
640
+ CSV formatted string
641
+ """
642
+ if operation_name == 'query.appStats':
643
+ return format_app_stats_to_csv(response_data)
644
+ elif operation_name == 'query.appStatsTimeSeries':
645
+ return format_app_stats_timeseries_to_csv(response_data)
646
+ elif operation_name == 'query.accountMetrics':
647
+ return format_account_metrics_to_csv(response_data)
648
+ elif operation_name == 'query.socketPortMetricsTimeSeries':
649
+ return format_socket_port_metrics_timeseries_to_csv(response_data)
650
+ else:
651
+ # Default: try to convert any JSON response to simple CSV
652
+ return json.dumps(response_data, indent=2)
catocli/__init__.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "2.1.2"
1
+ __version__ = "2.1.4"
2
2
  __cato_host__ = "https://api.catonetworks.com/api/v1/graphql2"
@@ -19,8 +19,6 @@ def export_rules_parse(subparsers):
19
19
  )
20
20
 
21
21
  if_rules_parser.add_argument('-accountID', help='Account ID to export rules from (uses CATO_ACCOUNT_ID environment variable if not specified)', required=False)
22
- if_rules_parser.add_argument('--output-file-path', help='Full path including filename and extension for output file. If not specified, uses default: config_data/all_ifw_rules_and_sections_{account_id}.json')
23
- if_rules_parser.add_argument('--append-timestamp', action='store_true', help='Append timestamp to the filename after account ID (format: YYYY-MM-DD_HH-MM-SS)')
24
22
  if_rules_parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output')
25
23
 
26
24
  if_rules_parser.set_defaults(func=export_rules.export_if_rules_to_json)
@@ -33,8 +31,6 @@ def export_rules_parse(subparsers):
33
31
  )
34
32
 
35
33
  wf_rules_parser.add_argument('-accountID', help='Account ID to export rules from (uses CATO_ACCOUNT_ID environment variable if not specified)', required=False)
36
- wf_rules_parser.add_argument('--output-file-path', help='Full path including filename and extension for output file. If not specified, uses default: config_data/all_wf_rules_and_sections_{account_id}.json')
37
- wf_rules_parser.add_argument('--append-timestamp', action='store_true', help='Append timestamp to the filename after account ID (format: YYYY-MM-DD_HH-MM-SS)')
38
34
  wf_rules_parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output')
39
35
 
40
36
  wf_rules_parser.set_defaults(func=export_rules.export_wf_rules_to_json)
@@ -11,9 +11,10 @@ def export_sites_parse(subparsers):
11
11
  )
12
12
 
13
13
  socket_sites_parser.add_argument('-accountID', help='Account ID to export data from (uses CATO_ACCOUNT_ID environment variable if not specified)', required=False)
14
- socket_sites_parser.add_argument('-siteIDs', help='Comma-separated list of site IDs to export (e.g., "132606,132964,133511")', required=False)
15
- socket_sites_parser.add_argument('--output-file-path', help='Full path including filename and extension for output file. If not specified, uses default: config_data/socket_site_data_{account_id}.json')
16
- socket_sites_parser.add_argument('--append-timestamp', action='store_true', help='Append timestamp to the filename after account ID (format: YYYY-MM-DD_HH-MM-SS)')
14
+ socket_sites_parser.add_argument('--site-ids', '-siteIDs', dest='siteIDs', help='Comma-separated list of site IDs to filter and export (e.g., "1234,1235,1236"). If not specified, exports all sites.', required=False)
15
+ socket_sites_parser.add_argument('-clip', '--calculate-local-ip', action='store_true', help='Calculate local IP addresses from subnet ranges (first usable IP)')
16
+ socket_sites_parser.add_argument('--json-filename', dest='json_filename', help='Override JSON file name (default: socket_sites_{account_id}.json)')
17
+ socket_sites_parser.add_argument('--append-timestamp', dest='append_timestamp', action='store_true', help='Append timestamp to the JSON file name')
17
18
  socket_sites_parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output')
18
19
 
19
20
  socket_sites_parser.set_defaults(func=export_sites.export_socket_site_to_json)