catocli 3.0.18__py3-none-any.whl → 3.0.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of catocli might be problematic. Click here for more details.

Files changed (43) hide show
  1. catocli/Utils/clidriver.py +16 -8
  2. catocli/Utils/formatter_account_metrics.py +544 -0
  3. catocli/Utils/formatter_app_stats.py +184 -0
  4. catocli/Utils/formatter_app_stats_timeseries.py +377 -0
  5. catocli/Utils/formatter_events_timeseries.py +459 -0
  6. catocli/Utils/formatter_socket_port_metrics.py +189 -0
  7. catocli/Utils/formatter_socket_port_metrics_timeseries.py +339 -0
  8. catocli/Utils/formatter_utils.py +251 -0
  9. catocli/__init__.py +1 -1
  10. catocli/clisettings.json +37 -5
  11. catocli/parsers/customParserApiClient.py +211 -66
  12. catocli/parsers/mutation_policy/__init__.py +405 -405
  13. catocli/parsers/mutation_site/__init__.py +15 -15
  14. catocli/parsers/mutation_sites/__init__.py +15 -15
  15. catocli/parsers/query_accountMetrics/README.md +90 -0
  16. catocli/parsers/query_accountMetrics/__init__.py +6 -0
  17. catocli/parsers/query_appStats/README.md +2 -2
  18. catocli/parsers/query_appStats/__init__.py +4 -2
  19. catocli/parsers/query_appStatsTimeSeries/__init__.py +4 -2
  20. catocli/parsers/query_eventsTimeSeries/README.md +280 -0
  21. catocli/parsers/query_eventsTimeSeries/__init__.py +6 -0
  22. catocli/parsers/query_policy/__init__.py +42 -42
  23. catocli/parsers/query_socketPortMetrics/README.md +44 -0
  24. catocli/parsers/query_socketPortMetrics/__init__.py +6 -0
  25. catocli/parsers/query_socketPortMetricsTimeSeries/README.md +83 -0
  26. catocli/parsers/query_socketPortMetricsTimeSeries/__init__.py +4 -2
  27. catocli/parsers/utils/export_utils.py +6 -2
  28. catocli-3.0.24.dist-info/METADATA +184 -0
  29. {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/RECORD +37 -35
  30. {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/top_level.txt +0 -1
  31. models/mutation.xdr.analystFeedback.json +822 -87
  32. models/query.xdr.stories.json +822 -87
  33. models/query.xdr.story.json +822 -87
  34. schema/catolib.py +89 -64
  35. catocli/Utils/csv_formatter.py +0 -663
  36. catocli-3.0.18.dist-info/METADATA +0 -124
  37. scripts/catolib.py +0 -62
  38. scripts/export_if_rules_to_json.py +0 -188
  39. scripts/export_wf_rules_to_json.py +0 -111
  40. scripts/import_wf_rules_to_tfstate.py +0 -331
  41. {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/WHEEL +0 -0
  42. {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/entry_points.txt +0 -0
  43. {catocli-3.0.18.dist-info → catocli-3.0.24.dist-info}/licenses/LICENSE +0 -0
@@ -1,663 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- CSV Formatter for Cato CLI
4
-
5
- This module provides functions to convert JSON responses from Cato API
6
- into CSV format, with special handling for timeseries data in long format.
7
-
8
- Supports multiple response patterns:
9
- - Records grid (appStats): records[] with fieldsMap + fieldsUnitTypes
10
- - Long-format timeseries (appStatsTimeSeries, socketPortMetricsTimeSeries): timeseries[] with labels (one row per timestamp)
11
- - Hierarchical timeseries (userMetrics): sites[] → interfaces[] → timeseries[] (one row per timestamp)
12
-
13
- All timeseries formatters now use long format (timestamp_period column) for better readability.
14
- """
15
-
16
- import csv
17
- import io
18
- import json
19
- import re
20
- from datetime import datetime
21
- from typing import Dict, List, Any, Optional, Set, Tuple
22
-
23
-
24
- # Shared Helper Functions
25
-
26
- def format_timestamp(timestamp_ms: int) -> str:
27
- """
28
- Convert timestamp from milliseconds to readable format
29
-
30
- Args:
31
- timestamp_ms: Timestamp in milliseconds
32
-
33
- Returns:
34
- Formatted timestamp string in UTC
35
- """
36
- try:
37
- # Convert milliseconds to seconds for datetime
38
- timestamp_sec = timestamp_ms / 1000
39
- dt = datetime.utcfromtimestamp(timestamp_sec)
40
- return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
41
- except (ValueError, OSError):
42
- return str(timestamp_ms)
43
-
44
-
45
- def convert_bytes_to_mb(value: Any) -> str:
46
- """
47
- Convert bytes value to megabytes with proper formatting
48
-
49
- Args:
50
- value: The value to convert (should be numeric)
51
-
52
- Returns:
53
- Formatted MB value as string
54
- """
55
- if not value or not str(value).replace('.', '').replace('-', '').isdigit():
56
- return str(value) if value is not None else ''
57
-
58
- try:
59
- # Convert bytes to megabytes (divide by 1,048,576)
60
- mb_value = float(value) / 1048576
61
- # Format to 3 decimal places, but remove trailing zeros
62
- return f"{mb_value:.3f}".rstrip('0').rstrip('.')
63
- except (ValueError, ZeroDivisionError):
64
- return str(value) if value is not None else ''
65
-
66
-
67
- def parse_label_for_dimensions_and_measure(label: str) -> Tuple[str, Dict[str, str]]:
68
- """
69
- Parse timeseries label to extract measure and dimensions
70
-
71
- Args:
72
- label: Label like "sum(traffic) for application_name='App', user_name='User'"
73
-
74
- Returns:
75
- Tuple of (measure, dimensions_dict)
76
- """
77
- measure = ""
78
- dimensions = {}
79
-
80
- if ' for ' in label:
81
- measure_part, dim_part = label.split(' for ', 1)
82
- # Extract measure (e.g., "sum(traffic)")
83
- if '(' in measure_part and ')' in measure_part:
84
- measure = measure_part.split('(')[1].split(')')[0]
85
-
86
- # Parse dimensions using regex for better handling of quoted values
87
- # Matches: key='value' or key="value" or key=value
88
- dim_pattern = r'(\w+)=[\'"]*([^,\'"]+)[\'"]*'
89
- matches = re.findall(dim_pattern, dim_part)
90
- for key, value in matches:
91
- dimensions[key.strip()] = value.strip()
92
- else:
93
- # Fallback: use the whole label as measure
94
- measure = label
95
-
96
- return measure, dimensions
97
-
98
-
99
- def is_bytes_measure(measure: str, units: str = "") -> bool:
100
- """
101
- Determine if a measure represents bytes data that should be converted to MB
102
-
103
- Args:
104
- measure: The measure name
105
- units: The units field if available
106
-
107
- Returns:
108
- True if this measure should be converted to MB
109
- """
110
- bytes_measures = {
111
- 'downstream', 'upstream', 'traffic', 'bytes', 'bytesDownstream',
112
- 'bytesUpstream', 'bytesTotal', 'throughput_downstream', 'throughput_upstream'
113
- }
114
-
115
- # Check if measure name indicates bytes
116
- if measure.lower() in bytes_measures:
117
- return True
118
-
119
- # Check if measure contains bytes-related keywords
120
- if any(keyword in measure.lower() for keyword in ['bytes', 'throughput']):
121
- return True
122
-
123
- # Check units field
124
- if units and 'bytes' in units.lower():
125
- return True
126
-
127
- return False
128
-
129
-
130
- def build_wide_timeseries_header(dimension_names: List[str], measures: List[str],
131
- sorted_timestamps: List[int], bytes_measures: Set[str]) -> List[str]:
132
- """
133
- Build header for wide-format timeseries CSV
134
-
135
- Args:
136
- dimension_names: List of dimension column names
137
- measures: List of measure names
138
- sorted_timestamps: List of timestamps in order
139
- bytes_measures: Set of measures that should have _mb suffix
140
-
141
- Returns:
142
- Complete header row as list of strings
143
- """
144
- header = dimension_names.copy()
145
-
146
- # Add timestamp and measure columns for each time period
147
- for i, timestamp in enumerate(sorted_timestamps, 1):
148
- header.append(f'timestamp_period_{i}')
149
- for measure in measures:
150
- if measure in bytes_measures:
151
- header.append(f'{measure}_period_{i}_mb')
152
- else:
153
- header.append(f'{measure}_period_{i}')
154
-
155
- return header
156
-
157
-
158
- def format_app_stats_to_csv(response_data: Dict[str, Any]) -> str:
159
- """
160
- Convert appStats JSON response to CSV format
161
-
162
- Args:
163
- response_data: JSON response from appStats query
164
-
165
- Returns:
166
- CSV formatted string
167
- """
168
- if not response_data or not isinstance(response_data, dict):
169
- return ""
170
-
171
- # Check for API errors
172
- if 'errors' in response_data:
173
- return ""
174
-
175
- if 'data' not in response_data or 'appStats' not in response_data['data']:
176
- return ""
177
-
178
- app_stats = response_data['data']['appStats']
179
- if not app_stats or not isinstance(app_stats, dict):
180
- return ""
181
-
182
- records = app_stats.get('records', [])
183
-
184
- if not records:
185
- return ""
186
-
187
- # Get all possible field names from the first record's fieldsMap
188
- first_record = records[0]
189
- field_names = list(first_record.get('fieldsMap', {}).keys())
190
- field_unit_types = first_record.get('fieldsUnitTypes', [])
191
-
192
- # Create CSV output
193
- output = io.StringIO()
194
- writer = csv.writer(output)
195
-
196
- # Create headers with _mb suffix for bytes fields
197
- headers = []
198
- for i, field_name in enumerate(field_names):
199
- if i < len(field_unit_types) and field_unit_types[i] == 'bytes':
200
- headers.append(f'{field_name}_mb')
201
- else:
202
- headers.append(field_name)
203
-
204
- # Write header
205
- writer.writerow(headers)
206
-
207
- # Write data rows
208
- for record in records:
209
- fields_map = record.get('fieldsMap', {})
210
- record_unit_types = record.get('fieldsUnitTypes', [])
211
- row = []
212
-
213
- for i, field in enumerate(field_names):
214
- value = fields_map.get(field, '')
215
-
216
- # Convert bytes to MB if the field type is bytes
217
- if (i < len(record_unit_types) and
218
- record_unit_types[i] == 'bytes' and
219
- value and str(value).replace('.', '').replace('-', '').isdigit()):
220
- try:
221
- # Convert bytes to megabytes (divide by 1,048,576)
222
- mb_value = float(value) / 1048576
223
- # Format to 3 decimal places, but remove trailing zeros
224
- formatted_value = f"{mb_value:.3f}".rstrip('0').rstrip('.')
225
- row.append(formatted_value)
226
- except (ValueError, ZeroDivisionError):
227
- row.append(value)
228
- else:
229
- row.append(value)
230
-
231
- writer.writerow(row)
232
-
233
- return output.getvalue()
234
-
235
-
236
- def format_app_stats_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
237
- """
238
- Convert appStatsTimeSeries JSON response to long-format CSV (one row per timestamp)
239
-
240
- Args:
241
- response_data: JSON response from appStatsTimeSeries query
242
-
243
- Returns:
244
- CSV formatted string in long format with one row per timestamp
245
- """
246
- if not response_data or 'data' not in response_data or 'appStatsTimeSeries' not in response_data['data']:
247
- return ""
248
-
249
- app_stats_ts = response_data['data']['appStatsTimeSeries']
250
- timeseries = app_stats_ts.get('timeseries', [])
251
-
252
- if not timeseries:
253
- return ""
254
-
255
- # Parse dimension information and measures from labels
256
- # Labels are like: "sum(traffic) for application_name='Google Applications', user_name='PM Analyst'"
257
- parsed_series = []
258
- all_timestamps = set()
259
-
260
- for series in timeseries:
261
- label = series.get('label', '')
262
- data_points = series.get('data', [])
263
-
264
- # Extract measure and dimensions from label
265
- # Example: "sum(traffic) for application_name='Google Applications', user_name='PM Analyst'"
266
- measure = ""
267
- dimensions = {}
268
-
269
- try:
270
- if ' for ' in label:
271
- measure_part, dim_part = label.split(' for ', 1)
272
- # Extract measure (e.g., "sum(traffic)")
273
- if '(' in measure_part and ')' in measure_part:
274
- measure = measure_part.split('(')[1].split(')')[0]
275
-
276
- # Parse dimensions using regex for better handling of quoted values
277
- # Matches: key='value' or key="value" or key=value
278
- dim_pattern = r'(\w+)=[\'"]*([^,\'"]+)[\'"]*'
279
- matches = re.findall(dim_pattern, dim_part)
280
- for key, value in matches:
281
- dimensions[key.strip()] = value.strip()
282
- else:
283
- # Fallback: use the whole label as measure
284
- measure = label
285
-
286
- # Create series entry with safe data parsing
287
- data_dict = {}
288
- for point in data_points:
289
- if isinstance(point, (list, tuple)) and len(point) >= 2:
290
- data_dict[int(point[0])] = point[1]
291
-
292
- series_entry = {
293
- 'measure': measure,
294
- 'dimensions': dimensions,
295
- 'data': data_dict
296
- }
297
- parsed_series.append(series_entry)
298
-
299
- # Collect all timestamps
300
- all_timestamps.update(series_entry['data'].keys())
301
- except Exception as e:
302
- print(f"DEBUG: Error processing series with label '{label}': {e}")
303
- continue
304
-
305
- # Sort timestamps
306
- sorted_timestamps = sorted(all_timestamps)
307
-
308
- # Collect all data in long format (one row per timestamp and dimension combination)
309
- rows = []
310
-
311
- # Get all unique dimension combinations
312
- dimension_combos = {}
313
- for series in parsed_series:
314
- try:
315
- dim_key = tuple(sorted(series['dimensions'].items()))
316
- if dim_key not in dimension_combos:
317
- dimension_combos[dim_key] = {}
318
- dimension_combos[dim_key][series['measure']] = series['data']
319
- except Exception as e:
320
- print(f"DEBUG: Error processing dimension combination for series: {e}")
321
- print(f"DEBUG: Series dimensions: {series.get('dimensions', {})}")
322
- continue
323
-
324
- # Create rows for each timestamp and dimension combination
325
- for dim_combo, measures_data in dimension_combos.items():
326
- dim_dict = dict(dim_combo)
327
-
328
- for timestamp in sorted_timestamps:
329
- # Build row data for this timestamp
330
- row_data = {
331
- 'timestamp_period': format_timestamp(timestamp)
332
- }
333
-
334
- # Add dimension values
335
- for key, value in dim_dict.items():
336
- row_data[key] = value
337
-
338
- # Add measure values for this timestamp
339
- for measure, data in measures_data.items():
340
- value = data.get(timestamp, '')
341
-
342
- # Convert bytes measures to MB and add appropriate suffix
343
- if measure in ['downstream', 'upstream', 'traffic']:
344
- if value:
345
- try:
346
- mb_value = float(value) / 1048576
347
- formatted_value = f"{mb_value:.3f}".rstrip('0').rstrip('.')
348
- row_data[f'{measure}_mb'] = formatted_value
349
- except (ValueError, ZeroDivisionError):
350
- row_data[f'{measure}_mb'] = value
351
- else:
352
- row_data[f'{measure}_mb'] = value
353
- else:
354
- row_data[measure] = value
355
-
356
- rows.append(row_data)
357
-
358
- if not rows:
359
- return ""
360
-
361
- # Create CSV output
362
- output = io.StringIO()
363
- writer = csv.writer(output)
364
-
365
- # Build header dynamically from all available columns
366
- all_columns = set()
367
- for row_data in rows:
368
- all_columns.update(row_data.keys())
369
-
370
- # Sort columns with timestamp_period first, then dimensions, then measures
371
- dimension_columns = []
372
- measure_columns = []
373
-
374
- for col in sorted(all_columns):
375
- if col == 'timestamp_period':
376
- continue # Will be added first
377
- elif col.endswith('_mb') or col in ['downstream', 'upstream', 'traffic']:
378
- measure_columns.append(col)
379
- else:
380
- dimension_columns.append(col)
381
-
382
- header = ['timestamp_period'] + sorted(dimension_columns) + sorted(measure_columns)
383
- writer.writerow(header)
384
-
385
- # Write data rows
386
- for row_data in rows:
387
- row = []
388
- for col in header:
389
- value = row_data.get(col, '')
390
- row.append(value)
391
- writer.writerow(row)
392
-
393
- return output.getvalue()
394
-
395
-
396
- def format_socket_port_metrics_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
397
- """
398
- Convert socketPortMetricsTimeSeries JSON response to long-format CSV (one row per timestamp)
399
-
400
- Args:
401
- response_data: JSON response from socketPortMetricsTimeSeries query
402
-
403
- Returns:
404
- CSV formatted string in long format with one row per timestamp
405
- """
406
- if not response_data or 'data' not in response_data or 'socketPortMetricsTimeSeries' not in response_data['data']:
407
- return ""
408
-
409
- socket_metrics_ts = response_data['data']['socketPortMetricsTimeSeries']
410
- timeseries = socket_metrics_ts.get('timeseries', [])
411
-
412
- if not timeseries:
413
- return ""
414
-
415
- # Parse measures from labels - these are simpler than appStatsTimeSeries
416
- # Labels are like: "sum(throughput_downstream)" with no dimensions
417
- parsed_series = []
418
- all_timestamps = set()
419
-
420
- for series in timeseries:
421
- label = series.get('label', '')
422
- data_points = series.get('data', [])
423
- units = series.get('unitsTimeseries', '')
424
- info = series.get('info', [])
425
-
426
- # Extract measure from label - usually just "sum(measure_name)"
427
- measure, dimensions = parse_label_for_dimensions_and_measure(label)
428
-
429
- # If no dimensions found in label, create default dimensions from info if available
430
- if not dimensions and info:
431
- # Info array might contain contextual data like socket/port identifiers
432
- for i, info_value in enumerate(info):
433
- dimensions[f'info_{i}'] = str(info_value)
434
-
435
- # If still no dimensions, create a single default dimension
436
- if not dimensions:
437
- dimensions = {'metric_source': 'socket_port'}
438
-
439
- series_entry = {
440
- 'measure': measure,
441
- 'dimensions': dimensions,
442
- 'units': units,
443
- 'data': {int(point[0]): point[1] for point in data_points if len(point) >= 2}
444
- }
445
- parsed_series.append(series_entry)
446
-
447
- # Collect all timestamps
448
- all_timestamps.update(series_entry['data'].keys())
449
-
450
- # Sort timestamps
451
- sorted_timestamps = sorted(all_timestamps)
452
-
453
- # Collect all data in long format (one row per timestamp and dimension combination)
454
- rows = []
455
-
456
- # Get all unique dimension combinations
457
- dimension_combos = {}
458
- for series in parsed_series:
459
- dim_key = tuple(sorted(series['dimensions'].items()))
460
- if dim_key not in dimension_combos:
461
- dimension_combos[dim_key] = {}
462
- dimension_combos[dim_key][series['measure']] = {
463
- 'data': series['data'],
464
- 'units': series['units']
465
- }
466
-
467
- # Create rows for each timestamp and dimension combination
468
- for dim_combo, measures_data in dimension_combos.items():
469
- dim_dict = dict(dim_combo)
470
-
471
- for timestamp in sorted_timestamps:
472
- # Build row data for this timestamp
473
- row_data = {
474
- 'timestamp_period': format_timestamp(timestamp)
475
- }
476
-
477
- # Add dimension values
478
- for key, value in dim_dict.items():
479
- row_data[key] = value
480
-
481
- # Add measure values for this timestamp
482
- for measure, measure_info in measures_data.items():
483
- value = measure_info['data'].get(timestamp, '')
484
- units = measure_info['units']
485
-
486
- # Convert bytes measures to MB and add appropriate suffix
487
- if is_bytes_measure(measure, units):
488
- if value:
489
- converted_value = convert_bytes_to_mb(value)
490
- row_data[f'{measure}_mb'] = converted_value
491
- else:
492
- row_data[f'{measure}_mb'] = value
493
- else:
494
- row_data[measure] = value
495
-
496
- rows.append(row_data)
497
-
498
- if not rows:
499
- return ""
500
-
501
- # Create CSV output
502
- output = io.StringIO()
503
- writer = csv.writer(output)
504
-
505
- # Build header dynamically from all available columns
506
- all_columns = set()
507
- for row_data in rows:
508
- all_columns.update(row_data.keys())
509
-
510
- # Sort columns with timestamp_period first, then dimensions, then measures
511
- dimension_columns = []
512
- measure_columns = []
513
-
514
- for col in sorted(all_columns):
515
- if col == 'timestamp_period':
516
- continue # Will be added first
517
- elif col.endswith('_mb') or col in ['throughput_downstream', 'throughput_upstream']:
518
- measure_columns.append(col)
519
- else:
520
- dimension_columns.append(col)
521
-
522
- header = ['timestamp_period'] + sorted(dimension_columns) + sorted(measure_columns)
523
- writer.writerow(header)
524
-
525
- # Write data rows
526
- for row_data in rows:
527
- row = []
528
- for col in header:
529
- value = row_data.get(col, '')
530
- row.append(value)
531
- writer.writerow(row)
532
-
533
- return output.getvalue()
534
-
535
-
536
- def format_user_metrics_to_csv(response_data: Dict[str, Any]) -> str:
537
- """
538
- Convert userMetrics JSON response to long-format CSV (one row per timestamp)
539
-
540
- Args:
541
- response_data: JSON response from userMetrics query
542
-
543
- Returns:
544
- CSV formatted string in long format with one row per timestamp
545
- """
546
- if not response_data or 'data' not in response_data or 'accountMetrics' not in response_data['data']:
547
- return ""
548
-
549
- account_metrics = response_data['data']['accountMetrics']
550
- users = account_metrics.get('users', [])
551
-
552
- if not users:
553
- return ""
554
-
555
- # Collect all data in long format (one row per timestamp)
556
- rows = []
557
-
558
- for user in users:
559
- user_id = user.get('id', '')
560
- interfaces = user.get('interfaces', [])
561
-
562
- for interface in interfaces:
563
- interface_name = interface.get('name', '')
564
- timeseries_list = interface.get('timeseries', [])
565
-
566
- # Organize timeseries data by timestamp
567
- timestamp_data = {}
568
- info_fields = {}
569
-
570
- for timeseries in timeseries_list:
571
- label = timeseries.get('label', '')
572
- units = timeseries.get('units', '')
573
- data_points = timeseries.get('data', [])
574
- info = timeseries.get('info', [])
575
-
576
- # Store info fields (should be consistent across timeseries)
577
- if info and len(info) >= 2:
578
- info_fields['info_user_id'] = str(info[0])
579
- info_fields['info_interface'] = str(info[1])
580
-
581
- # Process each data point
582
- for point in data_points:
583
- if isinstance(point, (list, tuple)) and len(point) >= 2:
584
- timestamp = int(point[0])
585
- value = point[1]
586
-
587
- if timestamp not in timestamp_data:
588
- timestamp_data[timestamp] = {}
589
-
590
- # Convert bytes measures to MB and add appropriate suffix
591
- if is_bytes_measure(label, units) and value:
592
- converted_value = convert_bytes_to_mb(value)
593
- timestamp_data[timestamp][f'{label}_mb'] = converted_value
594
- else:
595
- timestamp_data[timestamp][label] = value
596
-
597
- # Create rows for each timestamp
598
- for timestamp in sorted(timestamp_data.keys()):
599
- row_data = {
600
- 'info_interface': info_fields.get('info_interface', interface_name),
601
- 'info_user_id': info_fields.get('info_user_id', user_id),
602
- 'interface_name': interface_name,
603
- 'user_id': user_id,
604
- 'timestamp_period': format_timestamp(timestamp)
605
- }
606
-
607
- # Add all measures for this timestamp
608
- for measure, value in timestamp_data[timestamp].items():
609
- row_data[measure] = value
610
-
611
- rows.append(row_data)
612
-
613
- if not rows:
614
- return ""
615
-
616
- # Create CSV output
617
- output = io.StringIO()
618
- writer = csv.writer(output)
619
-
620
- # Build header based on the expected format from the reference file
621
- expected_measures = [
622
- 'bytesDownstream_mb', 'bytesDownstreamMax_mb', 'bytesUpstream_mb', 'bytesUpstreamMax_mb',
623
- 'health', 'lostDownstreamPcnt', 'lostUpstreamPcnt',
624
- 'packetsDiscardedDownstreamPcnt', 'packetsDiscardedUpstreamPcnt',
625
- 'rtt', 'tunnelAge'
626
- ]
627
-
628
- header = ['info_interface', 'info_user_id', 'interface_name', 'user_id', 'timestamp_period'] + expected_measures
629
- writer.writerow(header)
630
-
631
- # Write data rows
632
- for row_data in rows:
633
- row = []
634
- for col in header:
635
- value = row_data.get(col, '')
636
- row.append(value)
637
- writer.writerow(row)
638
-
639
- return output.getvalue()
640
-
641
-
642
- def format_to_csv(response_data: Dict[str, Any], operation_name: str) -> str:
643
- """
644
- Main function to format response data to CSV based on operation type
645
-
646
- Args:
647
- response_data: JSON response data
648
- operation_name: Name of the operation (e.g., 'query.appStats')
649
-
650
- Returns:
651
- CSV formatted string
652
- """
653
- if operation_name == 'query.appStats':
654
- return format_app_stats_to_csv(response_data)
655
- elif operation_name == 'query.appStatsTimeSeries':
656
- return format_app_stats_timeseries_to_csv(response_data)
657
- elif operation_name == 'query.socketPortMetricsTimeSeries':
658
- return format_socket_port_metrics_timeseries_to_csv(response_data)
659
- elif operation_name == 'query.userMetrics':
660
- return format_user_metrics_to_csv(response_data)
661
- else:
662
- # Default: try to convert any JSON response to simple CSV
663
- return json.dumps(response_data, indent=2)