catocli 3.0.3__py3-none-any.whl → 3.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of catocli might be problematic. Click here for more details.
- catocli/Utils/csv_formatter.py +228 -86
- catocli/__init__.py +1 -1
- catocli/clisettings.json +2 -1
- {catocli-3.0.3.dist-info → catocli-3.0.4.dist-info}/METADATA +1 -1
- {catocli-3.0.3.dist-info → catocli-3.0.4.dist-info}/RECORD +9 -9
- {catocli-3.0.3.dist-info → catocli-3.0.4.dist-info}/WHEEL +0 -0
- {catocli-3.0.3.dist-info → catocli-3.0.4.dist-info}/entry_points.txt +0 -0
- {catocli-3.0.3.dist-info → catocli-3.0.4.dist-info}/licenses/LICENSE +0 -0
- {catocli-3.0.3.dist-info → catocli-3.0.4.dist-info}/top_level.txt +0 -0
catocli/Utils/csv_formatter.py
CHANGED
|
@@ -3,12 +3,14 @@
|
|
|
3
3
|
CSV Formatter for Cato CLI
|
|
4
4
|
|
|
5
5
|
This module provides functions to convert JSON responses from Cato API
|
|
6
|
-
into CSV format, with special handling for timeseries data in
|
|
6
|
+
into CSV format, with special handling for timeseries data in long format.
|
|
7
7
|
|
|
8
8
|
Supports multiple response patterns:
|
|
9
9
|
- Records grid (appStats): records[] with fieldsMap + fieldsUnitTypes
|
|
10
|
-
-
|
|
11
|
-
- Hierarchical timeseries (
|
|
10
|
+
- Long-format timeseries (appStatsTimeSeries, socketPortMetricsTimeSeries): timeseries[] with labels (one row per timestamp)
|
|
11
|
+
- Hierarchical timeseries (userMetrics): sites[] → interfaces[] → timeseries[] (one row per timestamp)
|
|
12
|
+
|
|
13
|
+
All timeseries formatters now use long format (timestamp_period column) for better readability.
|
|
12
14
|
"""
|
|
13
15
|
|
|
14
16
|
import csv
|
|
@@ -233,14 +235,13 @@ def format_app_stats_to_csv(response_data: Dict[str, Any]) -> str:
|
|
|
233
235
|
|
|
234
236
|
def format_app_stats_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
|
|
235
237
|
"""
|
|
236
|
-
Convert appStatsTimeSeries JSON response to
|
|
237
|
-
Similar to the reference sccm_app_stats_wide_format.csv
|
|
238
|
+
Convert appStatsTimeSeries JSON response to long-format CSV (one row per timestamp)
|
|
238
239
|
|
|
239
240
|
Args:
|
|
240
241
|
response_data: JSON response from appStatsTimeSeries query
|
|
241
242
|
|
|
242
243
|
Returns:
|
|
243
|
-
CSV formatted string in
|
|
244
|
+
CSV formatted string in long format with one row per timestamp
|
|
244
245
|
"""
|
|
245
246
|
if not response_data or 'data' not in response_data or 'appStatsTimeSeries' not in response_data['data']:
|
|
246
247
|
return ""
|
|
@@ -304,6 +305,9 @@ def format_app_stats_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
|
|
|
304
305
|
# Sort timestamps
|
|
305
306
|
sorted_timestamps = sorted(all_timestamps)
|
|
306
307
|
|
|
308
|
+
# Collect all data in long format (one row per timestamp and dimension combination)
|
|
309
|
+
rows = []
|
|
310
|
+
|
|
307
311
|
# Get all unique dimension combinations
|
|
308
312
|
dimension_combos = {}
|
|
309
313
|
for series in parsed_series:
|
|
@@ -317,62 +321,73 @@ def format_app_stats_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
|
|
|
317
321
|
print(f"DEBUG: Series dimensions: {series.get('dimensions', {})}")
|
|
318
322
|
continue
|
|
319
323
|
|
|
324
|
+
# Create rows for each timestamp and dimension combination
|
|
325
|
+
for dim_combo, measures_data in dimension_combos.items():
|
|
326
|
+
dim_dict = dict(dim_combo)
|
|
327
|
+
|
|
328
|
+
for timestamp in sorted_timestamps:
|
|
329
|
+
# Build row data for this timestamp
|
|
330
|
+
row_data = {
|
|
331
|
+
'timestamp_period': format_timestamp(timestamp)
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
# Add dimension values
|
|
335
|
+
for key, value in dim_dict.items():
|
|
336
|
+
row_data[key] = value
|
|
337
|
+
|
|
338
|
+
# Add measure values for this timestamp
|
|
339
|
+
for measure, data in measures_data.items():
|
|
340
|
+
value = data.get(timestamp, '')
|
|
341
|
+
|
|
342
|
+
# Convert bytes measures to MB and add appropriate suffix
|
|
343
|
+
if measure in ['downstream', 'upstream', 'traffic']:
|
|
344
|
+
if value:
|
|
345
|
+
try:
|
|
346
|
+
mb_value = float(value) / 1048576
|
|
347
|
+
formatted_value = f"{mb_value:.3f}".rstrip('0').rstrip('.')
|
|
348
|
+
row_data[f'{measure}_mb'] = formatted_value
|
|
349
|
+
except (ValueError, ZeroDivisionError):
|
|
350
|
+
row_data[f'{measure}_mb'] = value
|
|
351
|
+
else:
|
|
352
|
+
row_data[f'{measure}_mb'] = value
|
|
353
|
+
else:
|
|
354
|
+
row_data[measure] = value
|
|
355
|
+
|
|
356
|
+
rows.append(row_data)
|
|
357
|
+
|
|
358
|
+
if not rows:
|
|
359
|
+
return ""
|
|
360
|
+
|
|
320
361
|
# Create CSV output
|
|
321
362
|
output = io.StringIO()
|
|
322
363
|
writer = csv.writer(output)
|
|
323
364
|
|
|
324
|
-
# Build header
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
dimension_names.update(series['dimensions'].keys())
|
|
329
|
-
measures.add(series['measure'])
|
|
365
|
+
# Build header dynamically from all available columns
|
|
366
|
+
all_columns = set()
|
|
367
|
+
for row_data in rows:
|
|
368
|
+
all_columns.update(row_data.keys())
|
|
330
369
|
|
|
331
|
-
|
|
332
|
-
|
|
370
|
+
# Sort columns with timestamp_period first, then dimensions, then measures
|
|
371
|
+
dimension_columns = []
|
|
372
|
+
measure_columns = []
|
|
333
373
|
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
if measure in ['downstream', 'upstream', 'traffic']:
|
|
342
|
-
header.append(f'{measure}_period_{i}_mb')
|
|
343
|
-
else:
|
|
344
|
-
header.append(f'{measure}_period_{i}')
|
|
374
|
+
for col in sorted(all_columns):
|
|
375
|
+
if col == 'timestamp_period':
|
|
376
|
+
continue # Will be added first
|
|
377
|
+
elif col.endswith('_mb') or col in ['downstream', 'upstream', 'traffic']:
|
|
378
|
+
measure_columns.append(col)
|
|
379
|
+
else:
|
|
380
|
+
dimension_columns.append(col)
|
|
345
381
|
|
|
382
|
+
header = ['timestamp_period'] + sorted(dimension_columns) + sorted(measure_columns)
|
|
346
383
|
writer.writerow(header)
|
|
347
384
|
|
|
348
385
|
# Write data rows
|
|
349
|
-
for
|
|
386
|
+
for row_data in rows:
|
|
350
387
|
row = []
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
for dim_name in dimension_names:
|
|
355
|
-
row.append(dim_dict.get(dim_name, ''))
|
|
356
|
-
|
|
357
|
-
# Add timestamp and measure data for each period
|
|
358
|
-
for timestamp in sorted_timestamps:
|
|
359
|
-
formatted_ts = format_timestamp(timestamp)
|
|
360
|
-
row.append(formatted_ts)
|
|
361
|
-
|
|
362
|
-
for measure in measures:
|
|
363
|
-
value = measures_data.get(measure, {}).get(timestamp, '')
|
|
364
|
-
# Convert bytes measures to MB
|
|
365
|
-
if measure in ['downstream', 'upstream', 'traffic'] and value and str(value).replace('.', '').replace('-', '').isdigit():
|
|
366
|
-
try:
|
|
367
|
-
# Convert bytes to megabytes
|
|
368
|
-
mb_value = float(value) / 1048576
|
|
369
|
-
formatted_value = f"{mb_value:.3f}".rstrip('0').rstrip('.')
|
|
370
|
-
row.append(formatted_value)
|
|
371
|
-
except (ValueError, ZeroDivisionError):
|
|
372
|
-
row.append(value)
|
|
373
|
-
else:
|
|
374
|
-
row.append(value)
|
|
375
|
-
|
|
388
|
+
for col in header:
|
|
389
|
+
value = row_data.get(col, '')
|
|
390
|
+
row.append(value)
|
|
376
391
|
writer.writerow(row)
|
|
377
392
|
|
|
378
393
|
return output.getvalue()
|
|
@@ -380,13 +395,13 @@ def format_app_stats_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
|
|
|
380
395
|
|
|
381
396
|
def format_socket_port_metrics_timeseries_to_csv(response_data: Dict[str, Any]) -> str:
|
|
382
397
|
"""
|
|
383
|
-
Convert socketPortMetricsTimeSeries JSON response to
|
|
398
|
+
Convert socketPortMetricsTimeSeries JSON response to long-format CSV (one row per timestamp)
|
|
384
399
|
|
|
385
400
|
Args:
|
|
386
401
|
response_data: JSON response from socketPortMetricsTimeSeries query
|
|
387
402
|
|
|
388
403
|
Returns:
|
|
389
|
-
CSV formatted string in
|
|
404
|
+
CSV formatted string in long format with one row per timestamp
|
|
390
405
|
"""
|
|
391
406
|
if not response_data or 'data' not in response_data or 'socketPortMetricsTimeSeries' not in response_data['data']:
|
|
392
407
|
return ""
|
|
@@ -435,6 +450,9 @@ def format_socket_port_metrics_timeseries_to_csv(response_data: Dict[str, Any])
|
|
|
435
450
|
# Sort timestamps
|
|
436
451
|
sorted_timestamps = sorted(all_timestamps)
|
|
437
452
|
|
|
453
|
+
# Collect all data in long format (one row per timestamp and dimension combination)
|
|
454
|
+
rows = []
|
|
455
|
+
|
|
438
456
|
# Get all unique dimension combinations
|
|
439
457
|
dimension_combos = {}
|
|
440
458
|
for series in parsed_series:
|
|
@@ -446,54 +464,176 @@ def format_socket_port_metrics_timeseries_to_csv(response_data: Dict[str, Any])
|
|
|
446
464
|
'units': series['units']
|
|
447
465
|
}
|
|
448
466
|
|
|
467
|
+
# Create rows for each timestamp and dimension combination
|
|
468
|
+
for dim_combo, measures_data in dimension_combos.items():
|
|
469
|
+
dim_dict = dict(dim_combo)
|
|
470
|
+
|
|
471
|
+
for timestamp in sorted_timestamps:
|
|
472
|
+
# Build row data for this timestamp
|
|
473
|
+
row_data = {
|
|
474
|
+
'timestamp_period': format_timestamp(timestamp)
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
# Add dimension values
|
|
478
|
+
for key, value in dim_dict.items():
|
|
479
|
+
row_data[key] = value
|
|
480
|
+
|
|
481
|
+
# Add measure values for this timestamp
|
|
482
|
+
for measure, measure_info in measures_data.items():
|
|
483
|
+
value = measure_info['data'].get(timestamp, '')
|
|
484
|
+
units = measure_info['units']
|
|
485
|
+
|
|
486
|
+
# Convert bytes measures to MB and add appropriate suffix
|
|
487
|
+
if is_bytes_measure(measure, units):
|
|
488
|
+
if value:
|
|
489
|
+
converted_value = convert_bytes_to_mb(value)
|
|
490
|
+
row_data[f'{measure}_mb'] = converted_value
|
|
491
|
+
else:
|
|
492
|
+
row_data[f'{measure}_mb'] = value
|
|
493
|
+
else:
|
|
494
|
+
row_data[measure] = value
|
|
495
|
+
|
|
496
|
+
rows.append(row_data)
|
|
497
|
+
|
|
498
|
+
if not rows:
|
|
499
|
+
return ""
|
|
500
|
+
|
|
449
501
|
# Create CSV output
|
|
450
502
|
output = io.StringIO()
|
|
451
503
|
writer = csv.writer(output)
|
|
452
504
|
|
|
453
|
-
# Build header
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
505
|
+
# Build header dynamically from all available columns
|
|
506
|
+
all_columns = set()
|
|
507
|
+
for row_data in rows:
|
|
508
|
+
all_columns.update(row_data.keys())
|
|
457
509
|
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
# Check if this measure should be converted to MB
|
|
463
|
-
if is_bytes_measure(series['measure'], series['units']):
|
|
464
|
-
bytes_measures.add(series['measure'])
|
|
510
|
+
# Sort columns with timestamp_period first, then dimensions, then measures
|
|
511
|
+
dimension_columns = []
|
|
512
|
+
measure_columns = []
|
|
465
513
|
|
|
466
|
-
|
|
467
|
-
|
|
514
|
+
for col in sorted(all_columns):
|
|
515
|
+
if col == 'timestamp_period':
|
|
516
|
+
continue # Will be added first
|
|
517
|
+
elif col.endswith('_mb') or col in ['throughput_downstream', 'throughput_upstream']:
|
|
518
|
+
measure_columns.append(col)
|
|
519
|
+
else:
|
|
520
|
+
dimension_columns.append(col)
|
|
468
521
|
|
|
469
|
-
|
|
470
|
-
header = build_wide_timeseries_header(dimension_names, measures, sorted_timestamps, bytes_measures)
|
|
522
|
+
header = ['timestamp_period'] + sorted(dimension_columns) + sorted(measure_columns)
|
|
471
523
|
writer.writerow(header)
|
|
472
524
|
|
|
473
525
|
# Write data rows
|
|
474
|
-
for
|
|
526
|
+
for row_data in rows:
|
|
475
527
|
row = []
|
|
528
|
+
for col in header:
|
|
529
|
+
value = row_data.get(col, '')
|
|
530
|
+
row.append(value)
|
|
531
|
+
writer.writerow(row)
|
|
532
|
+
|
|
533
|
+
return output.getvalue()
|
|
534
|
+
|
|
535
|
+
|
|
536
|
+
def format_user_metrics_to_csv(response_data: Dict[str, Any]) -> str:
|
|
537
|
+
"""
|
|
538
|
+
Convert userMetrics JSON response to long-format CSV (one row per timestamp)
|
|
539
|
+
|
|
540
|
+
Args:
|
|
541
|
+
response_data: JSON response from userMetrics query
|
|
476
542
|
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
543
|
+
Returns:
|
|
544
|
+
CSV formatted string in long format with one row per timestamp
|
|
545
|
+
"""
|
|
546
|
+
if not response_data or 'data' not in response_data or 'accountMetrics' not in response_data['data']:
|
|
547
|
+
return ""
|
|
548
|
+
|
|
549
|
+
account_metrics = response_data['data']['accountMetrics']
|
|
550
|
+
users = account_metrics.get('users', [])
|
|
551
|
+
|
|
552
|
+
if not users:
|
|
553
|
+
return ""
|
|
554
|
+
|
|
555
|
+
# Collect all data in long format (one row per timestamp)
|
|
556
|
+
rows = []
|
|
557
|
+
|
|
558
|
+
for user in users:
|
|
559
|
+
user_id = user.get('id', '')
|
|
560
|
+
interfaces = user.get('interfaces', [])
|
|
481
561
|
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
562
|
+
for interface in interfaces:
|
|
563
|
+
interface_name = interface.get('name', '')
|
|
564
|
+
timeseries_list = interface.get('timeseries', [])
|
|
565
|
+
|
|
566
|
+
# Organize timeseries data by timestamp
|
|
567
|
+
timestamp_data = {}
|
|
568
|
+
info_fields = {}
|
|
486
569
|
|
|
487
|
-
for
|
|
488
|
-
|
|
489
|
-
|
|
570
|
+
for timeseries in timeseries_list:
|
|
571
|
+
label = timeseries.get('label', '')
|
|
572
|
+
units = timeseries.get('units', '')
|
|
573
|
+
data_points = timeseries.get('data', [])
|
|
574
|
+
info = timeseries.get('info', [])
|
|
490
575
|
|
|
491
|
-
#
|
|
492
|
-
if
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
576
|
+
# Store info fields (should be consistent across timeseries)
|
|
577
|
+
if info and len(info) >= 2:
|
|
578
|
+
info_fields['info_user_id'] = str(info[0])
|
|
579
|
+
info_fields['info_interface'] = str(info[1])
|
|
580
|
+
|
|
581
|
+
# Process each data point
|
|
582
|
+
for point in data_points:
|
|
583
|
+
if isinstance(point, (list, tuple)) and len(point) >= 2:
|
|
584
|
+
timestamp = int(point[0])
|
|
585
|
+
value = point[1]
|
|
586
|
+
|
|
587
|
+
if timestamp not in timestamp_data:
|
|
588
|
+
timestamp_data[timestamp] = {}
|
|
589
|
+
|
|
590
|
+
# Convert bytes measures to MB and add appropriate suffix
|
|
591
|
+
if is_bytes_measure(label, units) and value:
|
|
592
|
+
converted_value = convert_bytes_to_mb(value)
|
|
593
|
+
timestamp_data[timestamp][f'{label}_mb'] = converted_value
|
|
594
|
+
else:
|
|
595
|
+
timestamp_data[timestamp][label] = value
|
|
596
|
+
|
|
597
|
+
# Create rows for each timestamp
|
|
598
|
+
for timestamp in sorted(timestamp_data.keys()):
|
|
599
|
+
row_data = {
|
|
600
|
+
'info_interface': info_fields.get('info_interface', interface_name),
|
|
601
|
+
'info_user_id': info_fields.get('info_user_id', user_id),
|
|
602
|
+
'interface_name': interface_name,
|
|
603
|
+
'user_id': user_id,
|
|
604
|
+
'timestamp_period': format_timestamp(timestamp)
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
# Add all measures for this timestamp
|
|
608
|
+
for measure, value in timestamp_data[timestamp].items():
|
|
609
|
+
row_data[measure] = value
|
|
610
|
+
|
|
611
|
+
rows.append(row_data)
|
|
612
|
+
|
|
613
|
+
if not rows:
|
|
614
|
+
return ""
|
|
615
|
+
|
|
616
|
+
# Create CSV output
|
|
617
|
+
output = io.StringIO()
|
|
618
|
+
writer = csv.writer(output)
|
|
619
|
+
|
|
620
|
+
# Build header based on the expected format from the reference file
|
|
621
|
+
expected_measures = [
|
|
622
|
+
'bytesDownstream_mb', 'bytesDownstreamMax_mb', 'bytesUpstream_mb', 'bytesUpstreamMax_mb',
|
|
623
|
+
'health', 'lostDownstreamPcnt', 'lostUpstreamPcnt',
|
|
624
|
+
'packetsDiscardedDownstreamPcnt', 'packetsDiscardedUpstreamPcnt',
|
|
625
|
+
'rtt', 'tunnelAge'
|
|
626
|
+
]
|
|
627
|
+
|
|
628
|
+
header = ['info_interface', 'info_user_id', 'interface_name', 'user_id', 'timestamp_period'] + expected_measures
|
|
629
|
+
writer.writerow(header)
|
|
630
|
+
|
|
631
|
+
# Write data rows
|
|
632
|
+
for row_data in rows:
|
|
633
|
+
row = []
|
|
634
|
+
for col in header:
|
|
635
|
+
value = row_data.get(col, '')
|
|
636
|
+
row.append(value)
|
|
497
637
|
writer.writerow(row)
|
|
498
638
|
|
|
499
639
|
return output.getvalue()
|
|
@@ -516,6 +656,8 @@ def format_to_csv(response_data: Dict[str, Any], operation_name: str) -> str:
|
|
|
516
656
|
return format_app_stats_timeseries_to_csv(response_data)
|
|
517
657
|
elif operation_name == 'query.socketPortMetricsTimeSeries':
|
|
518
658
|
return format_socket_port_metrics_timeseries_to_csv(response_data)
|
|
659
|
+
elif operation_name == 'query.userMetrics':
|
|
660
|
+
return format_user_metrics_to_csv(response_data)
|
|
519
661
|
else:
|
|
520
662
|
# Default: try to convert any JSON response to simple CSV
|
|
521
663
|
return json.dumps(response_data, indent=2)
|
catocli/__init__.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "3.0.
|
|
1
|
+
__version__ = "3.0.4"
|
|
2
2
|
__cato_host__ = "https://api.catonetworks.com/api/v1/graphql2"
|
catocli/clisettings.json
CHANGED
|
@@ -29,6 +29,7 @@
|
|
|
29
29
|
"queryOperationCsvOutput": {
|
|
30
30
|
"query.appStats": "format_app_stats_to_csv",
|
|
31
31
|
"query.appStatsTimeSeries": "format_app_stats_timeseries_to_csv",
|
|
32
|
-
"query.socketPortMetricsTimeSeries": "format_socket_port_metrics_timeseries_to_csv"
|
|
32
|
+
"query.socketPortMetricsTimeSeries": "format_socket_port_metrics_timeseries_to_csv",
|
|
33
|
+
"query.userMetrics": "format_user_metrics_to_csv"
|
|
33
34
|
}
|
|
34
35
|
}
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
catocli/__init__.py,sha256=
|
|
1
|
+
catocli/__init__.py,sha256=TfziiUjrfwgFwNkcAZEs4M0Y4lBGljQ3RaQ6by2RxDQ,84
|
|
2
2
|
catocli/__main__.py,sha256=6Z0ns_k_kUcz1Qtrn1u7UyUnqB-3e85jM_nppOwFsv4,217
|
|
3
|
-
catocli/clisettings.json,sha256=
|
|
3
|
+
catocli/clisettings.json,sha256=mXjDGxSR0-XVRk6_5mg5QZbaN4hOR2q-63yiUBWA3vU,1023
|
|
4
4
|
catocli/Utils/clidriver.py,sha256=lzVs1nYAiStwQjNxioxktwaKmfOF69Mmp5-9aWwsNOY,15972
|
|
5
5
|
catocli/Utils/cliutils.py,sha256=TTrAGlJjy9P07rLPGev9Qjx4w0g0KnWYBYcfNY1VIa8,6875
|
|
6
|
-
catocli/Utils/csv_formatter.py,sha256=
|
|
6
|
+
catocli/Utils/csv_formatter.py,sha256=eNy3HTTPZjABaNtxK9jSzgtRlL908n-x7_DI5qqCB1k,23668
|
|
7
7
|
catocli/Utils/profile_manager.py,sha256=a-cIhlhOiFbAEuX5Im0JraalWufkcAZS1NQQ0T4ck8I,7763
|
|
8
8
|
catocli/Utils/version_checker.py,sha256=tCtsCn7xxMIxOm6cWJSA_yPt0j4mNMK4iWSJej0yM6A,6696
|
|
9
9
|
catocli/parsers/customParserApiClient.py,sha256=PstHTFj-WhVaaKNNEHKGoF3IYTEY9Ca3h3H9l40O6Ng,70902
|
|
@@ -398,7 +398,7 @@ catocli/parsers/raw/__init__.py,sha256=fiSzQzNSG3vje-eEXuOcdhuL8pyavkufocOJumjdF
|
|
|
398
398
|
catocli/templates/Test_network_ranges.csv,sha256=_E5uE_nGI6k8LqMj_w_j2BmrBa1sL0SMOcYxtrnfjfE,2459
|
|
399
399
|
catocli/templates/socket_sites.csv,sha256=S5qY7whbydinMwomoAlDghoiFO_xqUKRwNG1xvzl8BI,1212
|
|
400
400
|
catocli/templates/socket_sites.json,sha256=X3NShci5-q3TpVSsaj62u4jFCvQAhxQ7knC-Lui_gOg,19535
|
|
401
|
-
catocli-3.0.
|
|
401
|
+
catocli-3.0.4.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
402
402
|
graphql_client/__init__.py,sha256=2nxD4YsWoOnALXi5cXbmtIN_i0NL_eyDTQRTxs52mkI,315
|
|
403
403
|
graphql_client/api_client.py,sha256=2Rc1Zo1xH9Jnk1AO68kLSofTShkZwSVF-WkVtczfIc4,5786
|
|
404
404
|
graphql_client/api_client_types.py,sha256=dM3zl6FA5SSp6nR6KmLfTL1BKaXX9uPMCZAm4v_FiUs,11569
|
|
@@ -746,8 +746,8 @@ vendor/urllib3/util/timeout.py,sha256=4eT1FVeZZU7h7mYD1Jq2OXNe4fxekdNvhoWUkZusRp
|
|
|
746
746
|
vendor/urllib3/util/url.py,sha256=wHORhp80RAXyTlAIkTqLFzSrkU7J34ZDxX-tN65MBZk,15213
|
|
747
747
|
vendor/urllib3/util/util.py,sha256=j3lbZK1jPyiwD34T8IgJzdWEZVT-4E-0vYIJi9UjeNA,1146
|
|
748
748
|
vendor/urllib3/util/wait.py,sha256=_ph8IrUR3sqPqi0OopQgJUlH4wzkGeM5CiyA7XGGtmI,4423
|
|
749
|
-
catocli-3.0.
|
|
750
|
-
catocli-3.0.
|
|
751
|
-
catocli-3.0.
|
|
752
|
-
catocli-3.0.
|
|
753
|
-
catocli-3.0.
|
|
749
|
+
catocli-3.0.4.dist-info/METADATA,sha256=e3DFkSjZAsHo2jFQN7E8EKw5238UAYbjlbr17dqz0hw,1286
|
|
750
|
+
catocli-3.0.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
751
|
+
catocli-3.0.4.dist-info/entry_points.txt,sha256=p4k9Orre6aWcqVrNmBbckmCs39h-1naMxRo2AjWmWZ4,50
|
|
752
|
+
catocli-3.0.4.dist-info/top_level.txt,sha256=F4qSgcjcW5wR9EFrO8Ud06F7ZQGFr04a9qALNQDyVxU,52
|
|
753
|
+
catocli-3.0.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|