catocli 3.0.10__py3-none-any.whl → 3.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of catocli might be problematic. Click here for more details.

Files changed (59) hide show
  1. catocli/Utils/clidriver.py +4 -4
  2. catocli/Utils/graphql_utils.py +15 -5
  3. catocli/Utils/help_formatter.py +62 -17
  4. catocli/__init__.py +1 -1
  5. catocli/parsers/custom/__init__.py +1 -1
  6. catocli/parsers/custom/customLib.py +3 -1
  7. catocli/parsers/mutation_groups_createGroup/README.md +39 -1
  8. catocli/parsers/mutation_groups_deleteGroup/README.md +39 -1
  9. catocli/parsers/mutation_groups_updateGroup/README.md +39 -1
  10. catocli/parsers/query_accountMetrics/README.md +29 -2
  11. catocli/parsers/query_accountSnapshot/README.md +16 -0
  12. catocli/parsers/query_appStats/README.md +11 -1
  13. catocli/parsers/query_appStatsTimeSeries/README.md +16 -2
  14. catocli/parsers/query_auditFeed/README.md +3 -1
  15. catocli/parsers/query_catalogs/README.md +178 -0
  16. catocli/parsers/query_container/README.md +49 -0
  17. catocli/parsers/query_devices/README.md +728 -0
  18. catocli/parsers/query_enterpriseDirectory/README.md +83 -0
  19. catocli/parsers/query_events/README.md +5 -1
  20. catocli/parsers/query_eventsTimeSeries/README.md +10 -2
  21. catocli/parsers/query_groups_groupList/README.md +39 -1
  22. catocli/parsers/query_hardware/README.md +153 -0
  23. catocli/parsers/query_hardwareManagement/README.md +56 -0
  24. catocli/parsers/query_popLocations/README.md +63 -0
  25. catocli/parsers/query_sandbox/README.md +69 -0
  26. catocli/parsers/query_socketPortMetrics/README.md +5 -1
  27. catocli/parsers/query_socketPortMetricsTimeSeries/README.md +10 -2
  28. catocli/parsers/query_xdr_stories/README.md +7 -2
  29. {catocli-3.0.10.dist-info → catocli-3.0.13.dist-info}/METADATA +1 -1
  30. {catocli-3.0.10.dist-info → catocli-3.0.13.dist-info}/RECORD +59 -59
  31. models/mutation.accountManagement.disableAccount.json +2 -2
  32. models/mutation.accountManagement.removeAccount.json +2 -2
  33. models/mutation.groups.createGroup.json +810 -0
  34. models/mutation.groups.deleteGroup.json +810 -0
  35. models/mutation.groups.updateGroup.json +810 -0
  36. models/query.accountMetrics.json +333 -1
  37. models/query.accountSnapshot.json +50 -1
  38. models/query.appStats.json +38 -0
  39. models/query.appStatsTimeSeries.json +78 -1
  40. models/query.auditFeed.json +105 -0
  41. models/query.catalogs.json +2708 -1
  42. models/query.container.json +793 -1
  43. models/query.devices.json +10338 -1
  44. models/query.enterpriseDirectory.json +1315 -1
  45. models/query.events.json +38 -0
  46. models/query.eventsFeed.json +1587 -0
  47. models/query.eventsTimeSeries.json +78 -1
  48. models/query.groups.groupList.json +810 -0
  49. models/query.hardware.json +2333 -1
  50. models/query.hardwareManagement.json +1086 -1
  51. models/query.popLocations.json +1172 -1
  52. models/query.sandbox.json +825 -1
  53. models/query.socketPortMetrics.json +38 -0
  54. models/query.socketPortMetricsTimeSeries.json +78 -1
  55. schema/catolib.py +107 -37
  56. {catocli-3.0.10.dist-info → catocli-3.0.13.dist-info}/WHEEL +0 -0
  57. {catocli-3.0.10.dist-info → catocli-3.0.13.dist-info}/entry_points.txt +0 -0
  58. {catocli-3.0.10.dist-info → catocli-3.0.13.dist-info}/licenses/LICENSE +0 -0
  59. {catocli-3.0.10.dist-info → catocli-3.0.13.dist-info}/top_level.txt +0 -0
@@ -1070,6 +1070,42 @@
1070
1070
  },
1071
1071
  "varName": "accountID"
1072
1072
  },
1073
+ "from": {
1074
+ "defaultValue": null,
1075
+ "description": null,
1076
+ "id_str": "records___from",
1077
+ "name": "from",
1078
+ "path": "records.from",
1079
+ "requestStr": "$from:Int ",
1080
+ "required": false,
1081
+ "responseStr": "from:$from ",
1082
+ "type": {
1083
+ "kind": [
1084
+ "SCALAR"
1085
+ ],
1086
+ "name": "Int",
1087
+ "non_null": false
1088
+ },
1089
+ "varName": "from"
1090
+ },
1091
+ "limit": {
1092
+ "defaultValue": null,
1093
+ "description": null,
1094
+ "id_str": "records___limit",
1095
+ "name": "limit",
1096
+ "path": "records.limit",
1097
+ "requestStr": "$limit:Int ",
1098
+ "required": false,
1099
+ "responseStr": "limit:$limit ",
1100
+ "type": {
1101
+ "kind": [
1102
+ "SCALAR"
1103
+ ],
1104
+ "name": "Int",
1105
+ "non_null": false
1106
+ },
1107
+ "varName": "limit"
1108
+ },
1073
1109
  "socketPortMetricsDimension": {
1074
1110
  "defaultValue": null,
1075
1111
  "description": null,
@@ -2962,6 +2998,8 @@
2962
2998
  "non_null": false
2963
2999
  },
2964
3000
  "variablesPayload": {
3001
+ "from": 1,
3002
+ "limit": 1,
2965
3003
  "socketPortMetricsDimension": {
2966
3004
  "fieldName": "account_id"
2967
3005
  },
@@ -844,6 +844,43 @@
844
844
  },
845
845
  "varName": "accountID"
846
846
  },
847
+ "buckets": {
848
+ "defaultValue": null,
849
+ "description": null,
850
+ "id_str": "timeseries___buckets",
851
+ "name": "buckets",
852
+ "path": "timeseries.buckets",
853
+ "requestStr": "$buckets:Int! ",
854
+ "required": true,
855
+ "responseStr": "buckets:$buckets ",
856
+ "type": {
857
+ "kind": [
858
+ "NON_NULL",
859
+ "SCALAR"
860
+ ],
861
+ "name": "Int",
862
+ "non_null": false
863
+ },
864
+ "varName": "buckets"
865
+ },
866
+ "perSecond": {
867
+ "defaultValue": "true",
868
+ "description": "whether to normalize the data into per second (i.e. divide by granularity)",
869
+ "id_str": "data___perSecond",
870
+ "name": "perSecond",
871
+ "path": "data.perSecond",
872
+ "requestStr": "$perSecond:Boolean ",
873
+ "required": false,
874
+ "responseStr": "perSecond:$perSecond ",
875
+ "type": {
876
+ "kind": [
877
+ "SCALAR"
878
+ ],
879
+ "name": "Boolean",
880
+ "non_null": false
881
+ },
882
+ "varName": "perSecond"
883
+ },
847
884
  "socketPortMetricsDimension": {
848
885
  "defaultValue": null,
849
886
  "description": null,
@@ -1642,6 +1679,42 @@
1642
1679
  "non_null": false
1643
1680
  },
1644
1681
  "varName": "timeFrame"
1682
+ },
1683
+ "useDefaultSizeBucket": {
1684
+ "defaultValue": "false",
1685
+ "description": "In case we want to have the default size bucket (from properties)",
1686
+ "id_str": "data___useDefaultSizeBucket",
1687
+ "name": "useDefaultSizeBucket",
1688
+ "path": "data.useDefaultSizeBucket",
1689
+ "requestStr": "$useDefaultSizeBucket:Boolean ",
1690
+ "required": false,
1691
+ "responseStr": "useDefaultSizeBucket:$useDefaultSizeBucket ",
1692
+ "type": {
1693
+ "kind": [
1694
+ "SCALAR"
1695
+ ],
1696
+ "name": "Boolean",
1697
+ "non_null": false
1698
+ },
1699
+ "varName": "useDefaultSizeBucket"
1700
+ },
1701
+ "withMissingData": {
1702
+ "defaultValue": "false",
1703
+ "description": "If false, the data field will be set to '0' for buckets with no reported data. Otherwise it will be set to -1",
1704
+ "id_str": "data___withMissingData",
1705
+ "name": "withMissingData",
1706
+ "path": "data.withMissingData",
1707
+ "requestStr": "$withMissingData:Boolean ",
1708
+ "required": false,
1709
+ "responseStr": "withMissingData:$withMissingData ",
1710
+ "type": {
1711
+ "kind": [
1712
+ "SCALAR"
1713
+ ],
1714
+ "name": "Boolean",
1715
+ "non_null": false
1716
+ },
1717
+ "varName": "withMissingData"
1645
1718
  }
1646
1719
  },
1647
1720
  "path": "query.socketPortMetricsTimeSeries",
@@ -2241,6 +2314,8 @@
2241
2314
  "non_null": false
2242
2315
  },
2243
2316
  "variablesPayload": {
2317
+ "buckets": 1,
2318
+ "perSecond": true,
2244
2319
  "socketPortMetricsDimension": {
2245
2320
  "fieldName": "account_id"
2246
2321
  },
@@ -2257,6 +2332,8 @@
2257
2332
  "fieldName": "account_id",
2258
2333
  "trend": true
2259
2334
  },
2260
- "timeFrame": "example_value"
2335
+ "timeFrame": "example_value",
2336
+ "useDefaultSizeBucket": true,
2337
+ "withMissingData": true
2261
2338
  }
2262
2339
  }
schema/catolib.py CHANGED
@@ -15,13 +15,14 @@ import concurrent.futures
15
15
  import threading
16
16
  from functools import lru_cache
17
17
  import traceback
18
+ import re
18
19
 
19
20
  # Import shared utilities
20
21
  from catocli.Utils.graphql_utils import (
21
22
  loadJSON,
22
23
  renderCamelCase,
23
24
  generateGraphqlPayload as shared_generateGraphqlPayload,
24
- renderArgsAndFields as shared_renderArgsAndFields,
25
+ renderArgsAndFields,
25
26
  postProcessBareComplexFields
26
27
  )
27
28
 
@@ -93,6 +94,41 @@ def openFile(fileName, readMode="rt"):
93
94
  # print('[ERROR] File path "'+fileName+'" in csv not found, or script unable to read.')
94
95
  exit()
95
96
 
97
+ def extract_comments_from_example_file(file_content):
98
+ """
99
+ Extract comments from example markdown file.
100
+ Returns both markdown headers and comments inside bash code blocks, deduplicated.
101
+ """
102
+ comments = []
103
+ lines = file_content.split('\n')
104
+ in_bash_block = False
105
+ seen_comments = set()
106
+
107
+ for line in lines:
108
+ # Check for markdown headers (lines starting with #)
109
+ if line.strip().startswith('# ') and not in_bash_block:
110
+ comment = line.strip()
111
+ if comment not in seen_comments:
112
+ comments.append(comment)
113
+ seen_comments.add(comment)
114
+
115
+ # Check for bash code block start/end
116
+ if line.strip() == '```bash':
117
+ in_bash_block = True
118
+ continue
119
+ elif line.strip() == '```':
120
+ in_bash_block = False
121
+ continue
122
+
123
+ # Extract comments inside bash blocks
124
+ if in_bash_block and line.strip().startswith('# '):
125
+ comment = line.strip()
126
+ if comment not in seen_comments:
127
+ comments.append(comment)
128
+ seen_comments.add(comment)
129
+
130
+ return comments
131
+
96
132
  ############ parsing schema - THREADED VERSION ############
97
133
 
98
134
  def parseSchema(schema):
@@ -229,20 +265,47 @@ def processOperation(operationType, operationName):
229
265
  arg = parsedOperation["args"][argName]
230
266
  parsedOperation["operationArgs"][arg["varName"]] = arg
231
267
 
232
- # Also include child operation arguments in operationArgs (avoid duplicates)
233
- if "childOperations" in parsedOperation:
234
- for childOpName, childOp in parsedOperation["childOperations"].items():
235
- if "args" in childOp:
236
- for childArgName, childArg in childOp["args"].items():
237
- if "varName" in childArg and childArg["varName"] not in parsedOperation["operationArgs"]:
238
- parsedOperation["operationArgs"][childArg["varName"]] = childArg
268
+ # Include child operation arguments and field arguments in operationArgs for README generation
269
+ # This is needed so that README generation shows all arguments including:
270
+ # 1. Child operation arguments like storyInput (for query.xdr.stories)
271
+ # 2. Field arguments like siteIDs and userIDs (for query.accountSnapshot)
272
+ def addAllOperationArgs(data, operationArgs):
273
+ """Recursively add child operation arguments and field arguments to operationArgs"""
274
+ if isinstance(data, dict):
275
+ # Handle child operations (like in query.xdr.stories)
276
+ if "childOperations" in data:
277
+ for childName, childOp in data["childOperations"].items():
278
+ if isinstance(childOp, dict) and "args" in childOp:
279
+ for argName, arg in childOp["args"].items():
280
+ # Use the arg's varName as the key to match how main args are stored
281
+ operationArgs[arg["varName"]] = arg
282
+ # Recursively process nested child operations
283
+ addAllOperationArgs(childOp, operationArgs)
284
+
285
+ # Handle field arguments (like siteIDs in sites field, userIDs in users field)
286
+ # Add null checks to prevent AttributeError
287
+ if ("type" in data and
288
+ "definition" in data["type"] and
289
+ "fields" in data["type"]["definition"] and
290
+ data["type"]["definition"]["fields"] is not None):
291
+
292
+ for fieldName, field in data["type"]["definition"]["fields"].items():
293
+ if isinstance(field, dict) and "args" in field:
294
+ for argName, arg in field["args"].items():
295
+ # Use the arg's varName as the key to match how main args are stored
296
+ operationArgs[arg["varName"]] = arg
297
+ # Recursively process nested fields
298
+ addAllOperationArgs(field, operationArgs)
299
+
300
+ # Add child operation arguments and field arguments to operationArgs
301
+ addAllOperationArgs(parsedOperation, parsedOperation["operationArgs"])
239
302
 
240
303
  parsedOperation["variablesPayload"] = generateExampleVariables(parsedOperation)
241
304
 
242
305
  # Write files with thread-safe locking
243
306
  writeFile("../models/"+operationName+".json", json.dumps(parsedOperation, indent=4, sort_keys=True))
244
307
 
245
- payload = generateGraphqlPayload(parsedOperation["variablesPayload"], parsedOperation, operationName)
308
+ payload = shared_generateGraphqlPayload(parsedOperation["variablesPayload"], parsedOperation, operationName, renderArgsAndFields)
246
309
  writeFile("../queryPayloads/"+operationName+".json", json.dumps(payload, indent=4, sort_keys=True))
247
310
  writeFile("../queryPayloads/"+operationName+".txt", payload["query"])
248
311
 
@@ -1059,10 +1122,25 @@ catocli {operationCmd} '{example_json_pretty}'
1059
1122
  example_file_path = f"examples/{operationPath}.md"
1060
1123
  try:
1061
1124
  example_content = openFile(example_file_path)
1125
+
1126
+ # Extract comments from the example file
1127
+ comments = extract_comments_from_example_file(example_content)
1128
+
1129
+ # Add comments as a summary section if any comments were found
1130
+ comments_section = ""
1131
+ if comments:
1132
+ comments_section = "### Additional Examples\n"
1133
+ for comment in comments:
1134
+ # Remove the leading # and clean up the comment
1135
+ clean_comment = comment.lstrip('# ').strip()
1136
+ if clean_comment: # Only add non-empty comments
1137
+ comments_section += f"- {clean_comment}\n"
1138
+ comments_section += "\n"
1139
+
1062
1140
  # Add the example content with proper formatting
1063
1141
  readmeStr += f"""
1064
1142
  ## Advanced Usage
1065
- {example_content}
1143
+ {comments_section}{example_content}
1066
1144
 
1067
1145
  """
1068
1146
  except:
@@ -1314,31 +1392,8 @@ def generateExampleVariables(operation):
1314
1392
  del variablesObj["accountId"]
1315
1393
  return variablesObj
1316
1394
 
1317
- def renderArgsAndFields(responseArgStr, variablesObj, curOperation, definition, indent, dynamic_operation_args=None, operation_name=None):
1318
- """Wrapper function to use the shared renderArgsAndFields with proper signature"""
1319
- # Handle variable argument signatures for backward compatibility
1320
- if operation_name is None:
1321
- operation_name = curOperation.get('name', 'unknown_operation')
1322
-
1323
- return shared_renderArgsAndFields(
1324
- responseArgStr,
1325
- variablesObj,
1326
- curOperation,
1327
- definition,
1328
- operation_name,
1329
- indent,
1330
- dynamic_operation_args,
1331
- None # custom_client
1332
- )
1333
-
1334
- def generateGraphqlPayload(variables_obj, operation, operation_name):
1335
- """Wrapper function to use the shared generateGraphqlPayload with renderArgsAndFields"""
1336
- return shared_generateGraphqlPayload(
1337
- variables_obj,
1338
- operation,
1339
- operation_name,
1340
- renderArgsAndFields # Pass our local wrapper function
1341
- )
1395
+ # Local renderArgsAndFields wrapper removed - now using shared function directly
1396
+
1342
1397
 
1343
1398
  def parseNestedArgFields(fieldObj):
1344
1399
  """Parse nested argument fields with realistic examples"""
@@ -1551,7 +1606,7 @@ def renderSubParser(subParser, parentParserPath):
1551
1606
  {subParserPath}_parser.add_argument('--headers-file', dest='headers_file', help='Load headers from a file. Each line should contain a header in "Key: Value" format.')
1552
1607
  {subParserPath}_parser.set_defaults(func=createRequest,operation_name='{operation_path}')
1553
1608
  """
1554
- # Add -f flag for CSV-supported operations
1609
+ # Add -f flag for CSV-supported operations
1555
1610
  if supports_csv:
1556
1611
  cliDriverStr += f"""
1557
1612
  {subParserPath}_parser.add_argument('-f', '--format', choices=['json', 'csv'], default='json', help='Output format (default: json)')
@@ -1612,10 +1667,25 @@ catocli {subOperationCmd} '{example_json_pretty}'
1612
1667
  example_file_path = f"examples/{subOperationPath}.md"
1613
1668
  try:
1614
1669
  example_content = openFile(example_file_path)
1670
+
1671
+ # Extract comments from the example file
1672
+ comments = extract_comments_from_example_file(example_content)
1673
+
1674
+ # Add comments as a summary section if any comments were found
1675
+ comments_section = ""
1676
+ if comments:
1677
+ comments_section = "### Additional Examples\n"
1678
+ for comment in comments:
1679
+ # Remove the leading # and clean up the comment
1680
+ clean_comment = comment.lstrip('# ').strip()
1681
+ if clean_comment: # Only add non-empty comments
1682
+ comments_section += f"- {clean_comment}\n"
1683
+ comments_section += "\n"
1684
+
1615
1685
  # Add the example content with proper formatting
1616
1686
  readmeStr += f"""
1617
1687
  ## Advanced Usage
1618
- {example_content}
1688
+ {comments_section}{example_content}
1619
1689
 
1620
1690
  """
1621
1691
  except: