regscale-cli 6.27.2.0__py3-none-any.whl → 6.27.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of regscale-cli might be problematic. Click here for more details.
- regscale/_version.py +1 -1
- regscale/core/app/application.py +1 -0
- regscale/core/app/internal/control_editor.py +73 -21
- regscale/core/app/internal/login.py +4 -1
- regscale/core/app/internal/model_editor.py +219 -64
- regscale/core/login.py +21 -4
- regscale/core/utils/date.py +77 -1
- regscale/integrations/commercial/aws/scanner.py +4 -1
- regscale/integrations/commercial/synqly/query_builder.py +4 -1
- regscale/integrations/control_matcher.py +78 -23
- regscale/integrations/public/csam/csam.py +572 -763
- regscale/integrations/public/csam/csam_agency_defined.py +179 -0
- regscale/integrations/public/csam/csam_common.py +154 -0
- regscale/integrations/public/csam/csam_controls.py +432 -0
- regscale/integrations/public/csam/csam_poam.py +124 -0
- regscale/integrations/public/fedramp/click.py +17 -4
- regscale/integrations/public/fedramp/fedramp_cis_crm.py +271 -62
- regscale/integrations/public/fedramp/poam/scanner.py +74 -7
- regscale/integrations/scanner_integration.py +16 -1
- regscale/models/integration_models/cisa_kev_data.json +49 -19
- regscale/models/integration_models/synqly_models/capabilities.json +1 -1
- regscale/models/integration_models/synqly_models/connectors/vulnerabilities.py +35 -2
- regscale/models/integration_models/synqly_models/ocsf_mapper.py +41 -12
- regscale/models/platform.py +3 -0
- regscale/models/regscale_models/__init__.py +5 -0
- regscale/models/regscale_models/component.py +1 -1
- regscale/models/regscale_models/control_implementation.py +55 -24
- regscale/models/regscale_models/organization.py +3 -0
- regscale/models/regscale_models/regscale_model.py +17 -5
- regscale/models/regscale_models/security_plan.py +1 -0
- regscale/regscale.py +11 -1
- {regscale_cli-6.27.2.0.dist-info → regscale_cli-6.27.3.0.dist-info}/METADATA +1 -1
- {regscale_cli-6.27.2.0.dist-info → regscale_cli-6.27.3.0.dist-info}/RECORD +40 -36
- tests/regscale/core/test_login.py +171 -4
- tests/regscale/integrations/test_control_matcher.py +24 -0
- tests/regscale/models/test_control_implementation.py +118 -3
- {regscale_cli-6.27.2.0.dist-info → regscale_cli-6.27.3.0.dist-info}/LICENSE +0 -0
- {regscale_cli-6.27.2.0.dist-info → regscale_cli-6.27.3.0.dist-info}/WHEEL +0 -0
- {regscale_cli-6.27.2.0.dist-info → regscale_cli-6.27.3.0.dist-info}/entry_points.txt +0 -0
- {regscale_cli-6.27.2.0.dist-info → regscale_cli-6.27.3.0.dist-info}/top_level.txt +0 -0
regscale/_version.py
CHANGED
regscale/core/app/application.py
CHANGED
|
@@ -88,6 +88,7 @@ class Application(metaclass=Singleton):
|
|
|
88
88
|
"csamToken": DEFAULT_SECRET,
|
|
89
89
|
"csamURL": "<myCSAMURLgoeshere>",
|
|
90
90
|
"csamFilter": {},
|
|
91
|
+
"csamFrameworkCatalog": {},
|
|
91
92
|
"dependabotId": "<myGithubUserIdGoesHere>",
|
|
92
93
|
"dependabotOwner": "<myGithubRepoOwnerGoesHere>",
|
|
93
94
|
"dependabotRepo": "<myGithubRepoNameGoesHere>",
|
|
@@ -32,8 +32,7 @@ from regscale.core.app.utils.app_utils import (
|
|
|
32
32
|
get_user_names,
|
|
33
33
|
)
|
|
34
34
|
from regscale.models.app_models.click import regscale_id, regscale_module
|
|
35
|
-
from regscale.models.regscale_models
|
|
36
|
-
from regscale.models.regscale_models.control_implementation import ControlImplementation
|
|
35
|
+
from regscale.models.regscale_models import Control, ControlImplementation
|
|
37
36
|
|
|
38
37
|
|
|
39
38
|
ALL_IMPS = "all_implementations.xlsx"
|
|
@@ -256,6 +255,76 @@ def check_and_format_cells(column: str, col: list[Any]) -> None:
|
|
|
256
255
|
cell.protection = Protection(locked=False) # Unprotect the cell
|
|
257
256
|
|
|
258
257
|
|
|
258
|
+
def _extract_control_owner_display(item: dict) -> str:
|
|
259
|
+
"""
|
|
260
|
+
Extract and format control owner display name from item data.
|
|
261
|
+
|
|
262
|
+
:param dict item: Item data containing controlOwner information
|
|
263
|
+
:return: Formatted control owner display string
|
|
264
|
+
:rtype: str
|
|
265
|
+
"""
|
|
266
|
+
if not item.get("controlOwner") or item["controlOwner"] is None:
|
|
267
|
+
return "Unassigned"
|
|
268
|
+
|
|
269
|
+
control_owner = item["controlOwner"]
|
|
270
|
+
last_name = str(control_owner.get("lastName", "")).strip() if control_owner.get("lastName") else ""
|
|
271
|
+
first_name = str(control_owner.get("firstName", "")).strip() if control_owner.get("firstName") else ""
|
|
272
|
+
user_name = str(control_owner.get("userName", "")).strip() if control_owner.get("userName") else ""
|
|
273
|
+
|
|
274
|
+
if last_name or first_name or user_name:
|
|
275
|
+
return f"{last_name}, {first_name} ({user_name})"
|
|
276
|
+
return "Unassigned"
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def _extract_control_data(item: dict) -> tuple:
|
|
280
|
+
"""
|
|
281
|
+
Extract control-related data from item.
|
|
282
|
+
|
|
283
|
+
:param dict item: Item data containing control information
|
|
284
|
+
:return: Tuple of (control_id, control_title, control_description, control_weight, catalogue_id)
|
|
285
|
+
:rtype: tuple
|
|
286
|
+
"""
|
|
287
|
+
if not item.get("control") or item["control"] is None:
|
|
288
|
+
return "", "", "", 0, 0
|
|
289
|
+
|
|
290
|
+
control = item["control"]
|
|
291
|
+
return (
|
|
292
|
+
control.get("controlId", ""),
|
|
293
|
+
control.get("title", ""),
|
|
294
|
+
control.get("description", ""),
|
|
295
|
+
control.get("weight", 0),
|
|
296
|
+
control.get("catalogueID", 0),
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def _build_implementation_row(item: dict) -> list:
|
|
301
|
+
"""
|
|
302
|
+
Build a single implementation row from item data.
|
|
303
|
+
|
|
304
|
+
:param dict item: Item data from GraphQL response
|
|
305
|
+
:return: List representing a row of implementation data
|
|
306
|
+
:rtype: list
|
|
307
|
+
"""
|
|
308
|
+
control_owner_display = _extract_control_owner_display(item)
|
|
309
|
+
control_id, control_title, control_description, control_weight, catalogue_id = _extract_control_data(item)
|
|
310
|
+
|
|
311
|
+
return [
|
|
312
|
+
item.get("id", 0),
|
|
313
|
+
item.get("controlID", 0),
|
|
314
|
+
control_owner_display,
|
|
315
|
+
control_id,
|
|
316
|
+
control_title,
|
|
317
|
+
control_description,
|
|
318
|
+
item.get("status", ""),
|
|
319
|
+
item.get("policy", ""),
|
|
320
|
+
item.get("implementation", ""),
|
|
321
|
+
item.get("responsibility", ""),
|
|
322
|
+
item.get("inheritable", False),
|
|
323
|
+
control_weight,
|
|
324
|
+
catalogue_id,
|
|
325
|
+
]
|
|
326
|
+
|
|
327
|
+
|
|
259
328
|
def _fetch_implementations(api: "Api", parent_id: int, parent_module: str) -> "pd.DataFrame":
|
|
260
329
|
"""
|
|
261
330
|
Function to fetch implementations from RegScale.
|
|
@@ -313,25 +382,8 @@ def _fetch_implementations(api: "Api", parent_id: int, parent_module: str) -> "p
|
|
|
313
382
|
if existing_implementation_data["controlImplementations"]["totalCount"] <= 0:
|
|
314
383
|
error_and_exit("No records exist for the given RegScale Id and RegScale Module.")
|
|
315
384
|
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
item["id"],
|
|
319
|
-
item["controlID"],
|
|
320
|
-
f'{str(item["controlOwner"]["lastName"]).strip()}, {str(item["controlOwner"]["firstName"]).strip()} '
|
|
321
|
-
+ f'({str(item["controlOwner"]["userName"]).strip()})',
|
|
322
|
-
item["control"]["controlId"],
|
|
323
|
-
item["control"]["title"],
|
|
324
|
-
item["control"]["description"],
|
|
325
|
-
item["status"],
|
|
326
|
-
item["policy"],
|
|
327
|
-
item["implementation"],
|
|
328
|
-
item["responsibility"],
|
|
329
|
-
item["inheritable"],
|
|
330
|
-
item["control"]["weight"],
|
|
331
|
-
item["control"]["catalogueID"],
|
|
332
|
-
]
|
|
333
|
-
for item in existing_implementation_data.get("controlImplementations", {}).get("items", [])
|
|
334
|
-
]
|
|
385
|
+
items = existing_implementation_data.get("controlImplementations", {}).get("items", [])
|
|
386
|
+
all_imps = [_build_implementation_row(item) for item in items]
|
|
335
387
|
|
|
336
388
|
all_imps_df = pd.DataFrame(
|
|
337
389
|
all_imps,
|
|
@@ -31,6 +31,7 @@ def login(
|
|
|
31
31
|
app: Optional["Application"] = None,
|
|
32
32
|
token: Optional[str] = None,
|
|
33
33
|
mfa_token: Optional[str] = "",
|
|
34
|
+
app_id: Optional[int] = 1,
|
|
34
35
|
) -> str:
|
|
35
36
|
"""
|
|
36
37
|
Wrapper for Login to RegScale
|
|
@@ -41,6 +42,7 @@ def login(
|
|
|
41
42
|
:param Optional[Application] app: Application object, defaults to None
|
|
42
43
|
:param Optional[str] token: a valid JWT token to pass, defaults to None
|
|
43
44
|
:param Optional[str] mfa_token: a valid MFA token to pass, defaults to ""
|
|
45
|
+
:param Optional[int] app_id: The app ID to login with
|
|
44
46
|
:raises: ValueError if no domain value found in init.yaml
|
|
45
47
|
:raises: TypeError if token or user id doesn't match expected data type
|
|
46
48
|
:raises: SSLCertVerificationError if unable to validate SSL certificate
|
|
@@ -103,9 +105,10 @@ def login(
|
|
|
103
105
|
password=str_password,
|
|
104
106
|
domain=host,
|
|
105
107
|
mfa_token=mfa_token,
|
|
108
|
+
app_id=app_id,
|
|
106
109
|
)
|
|
107
110
|
else:
|
|
108
|
-
regscale_auth = RegScaleAuth.authenticate(Api(), mfa_token=mfa_token)
|
|
111
|
+
regscale_auth = RegScaleAuth.authenticate(Api(), mfa_token=mfa_token, app_id=app_id)
|
|
109
112
|
if config and config["domain"] is None:
|
|
110
113
|
raise ValueError("No domain set in the init.yaml configuration file.")
|
|
111
114
|
if config and config["domain"] == "":
|
|
@@ -348,16 +348,22 @@ def upload_data(path: Path, obj_type: str) -> None:
|
|
|
348
348
|
if os.path.isfile(os.path.join(path, all_workbook_filename)):
|
|
349
349
|
if not os.path.isfile(os.path.join(path, old_workbook_filename)):
|
|
350
350
|
return app.logger.error("Missing pre-change copy file, unable to determine if changes were made. Aborting!")
|
|
351
|
+
|
|
352
|
+
# Get the sheet name from the Excel file
|
|
353
|
+
workbook_path = os.path.join(path, all_workbook_filename)
|
|
354
|
+
with pd.ExcelFile(workbook_path) as xls:
|
|
355
|
+
sheet_name = xls.sheet_names[0] if xls.sheet_names else "Sheet1"
|
|
356
|
+
|
|
351
357
|
df1 = pd.read_excel(os.path.join(path, old_workbook_filename), sheet_name=0, index_col="Id")
|
|
352
358
|
|
|
353
|
-
df2 = pd.read_excel(
|
|
359
|
+
df2 = pd.read_excel(workbook_path, sheet_name=0, index_col="Id")
|
|
354
360
|
|
|
355
361
|
if df1.equals(df2):
|
|
356
362
|
error_and_exit("No differences detected.")
|
|
357
363
|
|
|
358
|
-
app.logger.
|
|
364
|
+
app.logger.info("Changes detected in workbook. Processing updates...")
|
|
359
365
|
# Need to strip out any net new rows before doing this comparison
|
|
360
|
-
df3 = strip_any_net_new_rows(app, df2, all_workbook_filename, obj_type, path, new_workbook_filename)
|
|
366
|
+
df3 = strip_any_net_new_rows(app, df2, all_workbook_filename, obj_type, path, new_workbook_filename, sheet_name)
|
|
361
367
|
try:
|
|
362
368
|
changes = compare_dataframes(df1, df3)
|
|
363
369
|
except ValueError:
|
|
@@ -483,7 +489,13 @@ def upload_new_data(app: Application, path: Path, obj_type: str, workbook_filena
|
|
|
483
489
|
|
|
484
490
|
|
|
485
491
|
def strip_any_net_new_rows(
|
|
486
|
-
app: Application,
|
|
492
|
+
app: Application,
|
|
493
|
+
df: "pd.DataFrame",
|
|
494
|
+
workbook_filename: str,
|
|
495
|
+
obj_type: str,
|
|
496
|
+
path: Path,
|
|
497
|
+
new_workbook_filename: str,
|
|
498
|
+
sheet_name: Optional[str] = None,
|
|
487
499
|
) -> "pd.DataFrame":
|
|
488
500
|
"""
|
|
489
501
|
This method scans the loaded workbook for any new rows and strips them out to insert separately.
|
|
@@ -494,6 +506,7 @@ def strip_any_net_new_rows(
|
|
|
494
506
|
:param str obj_type: The model type to load the records as
|
|
495
507
|
:param Path path: The path where the Excel file can be found
|
|
496
508
|
:param str new_workbook_filename: The file name of the Excel spreadsheet with new records.
|
|
509
|
+
:param Optional[str] sheet_name: The name of the worksheet being processed
|
|
497
510
|
:return: pd.DataFrame The updated DataFrame, minus any new rows
|
|
498
511
|
:rtype: pd.DataFrame
|
|
499
512
|
"""
|
|
@@ -502,14 +515,14 @@ def strip_any_net_new_rows(
|
|
|
502
515
|
df_updates = []
|
|
503
516
|
df_inserts = []
|
|
504
517
|
indexes = []
|
|
505
|
-
columns =
|
|
518
|
+
columns = list(df.columns)
|
|
506
519
|
obj = get_obj(obj_type)
|
|
507
520
|
for x in df.index:
|
|
508
521
|
if math.isnan(x):
|
|
509
522
|
data_rec = {}
|
|
510
523
|
for y in columns:
|
|
511
524
|
data_rec[y] = df.at[x, y]
|
|
512
|
-
df_inserts.append(convert_new_record_to_model(data_rec, obj_type, path, workbook_filename))
|
|
525
|
+
df_inserts.append(convert_new_record_to_model(data_rec, obj_type, path, workbook_filename, sheet_name))
|
|
513
526
|
else:
|
|
514
527
|
indexes.append(x)
|
|
515
528
|
data_rec = []
|
|
@@ -519,7 +532,8 @@ def strip_any_net_new_rows(
|
|
|
519
532
|
new_df = pd.DataFrame(df_updates, index=indexes, columns=columns)
|
|
520
533
|
if len(df_inserts) > 0:
|
|
521
534
|
if obj.is_new_excel_record_allowed():
|
|
522
|
-
|
|
535
|
+
# Use workbook_filename (the actual file containing the data) instead of new_workbook_filename
|
|
536
|
+
post_and_save_models(app, df_inserts, path, obj_type, workbook_filename)
|
|
523
537
|
else:
|
|
524
538
|
app.logger.warning(
|
|
525
539
|
"New rows have been found in the Excel spreadsheet being loaded. New records for this model are not allowed."
|
|
@@ -528,18 +542,9 @@ def strip_any_net_new_rows(
|
|
|
528
542
|
return new_df
|
|
529
543
|
|
|
530
544
|
|
|
531
|
-
def
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
:param pd.DataFrame df:
|
|
536
|
-
:return: list of column names
|
|
537
|
-
:rtype: list
|
|
538
|
-
"""
|
|
539
|
-
return [y for y in df.columns]
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
def convert_new_record_to_model(data_rec: dict, obj_type: str, path: Path, workbook_filename: str) -> object:
|
|
545
|
+
def convert_new_record_to_model(
|
|
546
|
+
data_rec: dict, obj_type: str, path: Path, workbook_filename: str, sheet_name: Optional[str] = None
|
|
547
|
+
) -> object:
|
|
543
548
|
"""
|
|
544
549
|
This method takes the new record found in the Excel file of existing records, and converts it
|
|
545
550
|
into a model object for inserting into the database.
|
|
@@ -548,6 +553,7 @@ def convert_new_record_to_model(data_rec: dict, obj_type: str, path: Path, workb
|
|
|
548
553
|
:param str obj_type: The model type to load the records as
|
|
549
554
|
:param Path path: The path where the Excel file can be found
|
|
550
555
|
:param str workbook_filename: The file name of the Excel spreadsheet
|
|
556
|
+
:param Optional[str] sheet_name: The name of the worksheet being processed
|
|
551
557
|
:return: object
|
|
552
558
|
:rtype: object
|
|
553
559
|
:raises ValueError:
|
|
@@ -571,9 +577,29 @@ def convert_new_record_to_model(data_rec: dict, obj_type: str, path: Path, workb
|
|
|
571
577
|
elif cur_field.data_type == "str":
|
|
572
578
|
if not isinstance(new_obj[cur_field.field_name], str):
|
|
573
579
|
new_obj[cur_field.field_name] = str(new_obj[cur_field.field_name])
|
|
580
|
+
|
|
581
|
+
parse_parent_data(new_obj, sheet_name)
|
|
582
|
+
|
|
574
583
|
return cast_dict_as_model(new_obj, obj_type)
|
|
575
584
|
|
|
576
585
|
|
|
586
|
+
def parse_parent_data(new_obj: dict, sheet_name: str) -> None:
|
|
587
|
+
"""
|
|
588
|
+
Parse parentId and parentModule from worksheet name.
|
|
589
|
+
|
|
590
|
+
:param dict new_obj: The new object to parse the parent info for
|
|
591
|
+
:param str sheet_name: The worksheet name to parse
|
|
592
|
+
:rtype: None
|
|
593
|
+
"""
|
|
594
|
+
# Parse parentId and parentModule from sheet name if available
|
|
595
|
+
if sheet_name:
|
|
596
|
+
parent_id, parent_module = parse_parent_info_from_sheet_name(sheet_name)
|
|
597
|
+
if parent_id is not None:
|
|
598
|
+
new_obj["parentId"] = parent_id
|
|
599
|
+
if parent_module is not None:
|
|
600
|
+
new_obj["parentModule"] = parent_module
|
|
601
|
+
|
|
602
|
+
|
|
577
603
|
def generate_default_value_for_field(field_name: str, data_type: str) -> Any:
|
|
578
604
|
"""
|
|
579
605
|
Generate a default value for a required field.
|
|
@@ -597,10 +623,47 @@ def generate_default_value_for_field(field_name: str, data_type: str) -> Any:
|
|
|
597
623
|
return 0.0
|
|
598
624
|
|
|
599
625
|
|
|
626
|
+
def parse_parent_info_from_sheet_name(sheet_name: str) -> tuple[Optional[int], Optional[str]]:
|
|
627
|
+
"""
|
|
628
|
+
Parse parentId and parentModule from worksheet name.
|
|
629
|
+
|
|
630
|
+
Expected format: Issue(46_securityplans
|
|
631
|
+
Where:
|
|
632
|
+
- Issue( is the model prefix
|
|
633
|
+
- 46 is the parentId
|
|
634
|
+
- securityplans is the parentModule
|
|
635
|
+
|
|
636
|
+
:param str sheet_name: The worksheet name to parse
|
|
637
|
+
:return: Tuple of (parentId, parentModule), or (None, None) if pattern doesn't match
|
|
638
|
+
:rtype: tuple[Optional[int], Optional[str]]
|
|
639
|
+
"""
|
|
640
|
+
if not sheet_name or "(" not in sheet_name or "_" not in sheet_name:
|
|
641
|
+
return None, None
|
|
642
|
+
|
|
643
|
+
try:
|
|
644
|
+
# Find the opening parenthesis
|
|
645
|
+
paren_index = sheet_name.index("(")
|
|
646
|
+
# Get the part after the parenthesis
|
|
647
|
+
after_paren = sheet_name[paren_index + 1 :]
|
|
648
|
+
|
|
649
|
+
# Split by underscore
|
|
650
|
+
if "_" in after_paren:
|
|
651
|
+
parts = after_paren.split("_", 1) # Split on first underscore only
|
|
652
|
+
parent_id = int(parts[0])
|
|
653
|
+
parent_module = parts[1]
|
|
654
|
+
return parent_id, parent_module
|
|
655
|
+
except (ValueError, IndexError):
|
|
656
|
+
# If parsing fails, return None values
|
|
657
|
+
pass
|
|
658
|
+
|
|
659
|
+
return None, None
|
|
660
|
+
|
|
661
|
+
|
|
600
662
|
# pylint: disable=E1136,R0914
|
|
601
663
|
def upload_existing_data(app: Application, api: Api, path: Path, obj_type: str, workbook_filename: str) -> None:
|
|
602
664
|
"""
|
|
603
|
-
This method reads in the spreadsheet filled with existing records to update in RegScale
|
|
665
|
+
This method reads in the spreadsheet filled with existing records to update in RegScale
|
|
666
|
+
using the RegScaleModel save() and bulk_save() methods.
|
|
604
667
|
|
|
605
668
|
:param Application app: The Application instance
|
|
606
669
|
:param Api api: The instance api handler
|
|
@@ -630,48 +693,100 @@ def upload_existing_data(app: Application, api: Api, path: Path, obj_type: str,
|
|
|
630
693
|
logger.debug(changes)
|
|
631
694
|
id_df = pd.DataFrame(ids, index=None, columns=["Id"])
|
|
632
695
|
id_df2 = id_df.drop_duplicates()
|
|
696
|
+
logger.info(f"Found {len(id_df2)} unique {obj_type} ID(s) with changes: {id_df2['Id'].tolist()}")
|
|
697
|
+
|
|
633
698
|
updated_files = os.path.join(path, workbook_filename)
|
|
634
699
|
df3 = pd.read_excel(updated_files, sheet_name=0, index_col=None)
|
|
700
|
+
logger.debug(f"Read {len(df3)} total rows from Excel file")
|
|
701
|
+
|
|
635
702
|
updated = df3[df3["Id"].isin(id_df2["Id"])]
|
|
703
|
+
logger.info(f"Filtered to {len(updated)} {obj_type}(s) matching changed IDs")
|
|
704
|
+
|
|
705
|
+
if len(updated) == 0:
|
|
706
|
+
logger.error(
|
|
707
|
+
f"No {obj_type}s found in Excel file matching the IDs in differences.txt. "
|
|
708
|
+
f"Expected IDs: {id_df2['Id'].tolist()}. "
|
|
709
|
+
f"This usually means the Excel file doesn't contain these records."
|
|
710
|
+
)
|
|
711
|
+
return
|
|
712
|
+
|
|
636
713
|
updated = map_workbook_to_dict(updated_files, updated)
|
|
714
|
+
logger.debug(f"Converted to dictionary with {len(updated)} entries")
|
|
637
715
|
config = app.config
|
|
638
|
-
|
|
639
|
-
|
|
716
|
+
|
|
717
|
+
# Load existing model instances from API
|
|
718
|
+
load_objs = load_model_for_id(api, updated, config["domain"] + obj.get_endpoint("get"), obj_type)
|
|
719
|
+
|
|
720
|
+
# Apply changes to model instances and queue for bulk update
|
|
721
|
+
modified_objects = []
|
|
640
722
|
for cur_obj in load_objs:
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
723
|
+
# Apply Excel changes to the model instance
|
|
724
|
+
modified_obj = find_and_apply_changes(cur_obj, changes, updated)
|
|
725
|
+
|
|
726
|
+
# Ignore change tracking to ensure all updates are saved
|
|
727
|
+
modified_obj._ignore_has_changed = True
|
|
728
|
+
# Queue the instance for bulk update
|
|
729
|
+
modified_obj.save(bulk=True)
|
|
730
|
+
modified_objects.append(modified_obj)
|
|
731
|
+
|
|
732
|
+
# Execute bulk update using the model class
|
|
733
|
+
if modified_objects:
|
|
734
|
+
app.logger.info("Executing bulk update for %i %s(s)...", len(modified_objects), obj_type)
|
|
735
|
+
model_class = type(modified_objects[0])
|
|
736
|
+
results = model_class.bulk_save()
|
|
737
|
+
|
|
738
|
+
updated_count = len(results.get("updated", []))
|
|
739
|
+
created_count = len(results.get("created", []))
|
|
740
|
+
|
|
741
|
+
app.logger.info(
|
|
742
|
+
"Bulk operation completed: Updated %i %s(s), Created %i %s(s)",
|
|
743
|
+
updated_count,
|
|
744
|
+
obj_type,
|
|
745
|
+
created_count,
|
|
746
|
+
obj_type,
|
|
747
|
+
)
|
|
650
748
|
|
|
651
749
|
|
|
652
750
|
# pylint: enable=E1136,R0914
|
|
653
751
|
|
|
654
752
|
|
|
655
|
-
def find_and_apply_changes(cur_object:
|
|
753
|
+
def find_and_apply_changes(cur_object: object, changes: list, updates: dict) -> object:
|
|
656
754
|
"""
|
|
657
755
|
This method looks through the changes and applies those that should be applied to
|
|
658
|
-
the current
|
|
756
|
+
the current model instance.
|
|
659
757
|
|
|
660
|
-
:param
|
|
758
|
+
:param object cur_object: the current model instance being updated
|
|
661
759
|
:param list changes: a list of the specific changes to apply
|
|
662
760
|
:param dict updates: a dictionary of updated models to be applied to the current object(s)
|
|
663
|
-
:return:
|
|
664
|
-
:rtype:
|
|
761
|
+
:return: object the updated model instance
|
|
762
|
+
:rtype: object
|
|
665
763
|
"""
|
|
666
764
|
for cur_change in changes:
|
|
667
|
-
if cur_change["id"] == cur_object
|
|
765
|
+
if cur_change["id"] == cur_object.id:
|
|
668
766
|
field_def = get_field_def_for_column(cur_change["column"])
|
|
669
|
-
if
|
|
670
|
-
|
|
671
|
-
|
|
767
|
+
if field_def is None:
|
|
768
|
+
logger.warning(
|
|
769
|
+
f"Column '{cur_change['column']}' not found in model fields for {type(cur_object).__name__} "
|
|
770
|
+
f"ID {cur_object.id}. Change will be skipped."
|
|
672
771
|
)
|
|
772
|
+
continue
|
|
773
|
+
if len(field_def.lookup_field) > 0:
|
|
774
|
+
value = check_empty_nan(extract_update_for_column(field_def.field_name, cur_change["id"], updates))
|
|
775
|
+
setattr(cur_object, field_def.field_name, value)
|
|
673
776
|
else:
|
|
674
|
-
|
|
777
|
+
field_name = get_field_name_for_column(cur_change["column"])
|
|
778
|
+
if not field_name:
|
|
779
|
+
logger.warning(
|
|
780
|
+
f"Could not find field name for column '{cur_change['column']}' in {type(cur_object).__name__} "
|
|
781
|
+
f"ID {cur_object.id}. Change will be skipped."
|
|
782
|
+
)
|
|
783
|
+
continue
|
|
784
|
+
value = check_empty_nan(cur_change["value"])
|
|
785
|
+
logger.debug(
|
|
786
|
+
f"Applying change to {type(cur_object).__name__} ID {cur_object.id}: "
|
|
787
|
+
f"{field_name} = {value} (was: {getattr(cur_object, field_name, 'N/A')})"
|
|
788
|
+
)
|
|
789
|
+
setattr(cur_object, field_name, value)
|
|
675
790
|
return cur_object
|
|
676
791
|
|
|
677
792
|
|
|
@@ -732,7 +847,8 @@ def post_and_save_models(
|
|
|
732
847
|
load_file_name: str,
|
|
733
848
|
) -> None:
|
|
734
849
|
"""
|
|
735
|
-
Function to post new records to RegScale and save record ids to excel workbook
|
|
850
|
+
Function to post new records to RegScale and save record ids to excel workbook.
|
|
851
|
+
Uses the RegScaleModel .create() method for new objects.
|
|
736
852
|
|
|
737
853
|
:param Application app: RegScale CLI Application object
|
|
738
854
|
:param list new_models: List of new records to post to RegScale
|
|
@@ -745,28 +861,42 @@ def post_and_save_models(
|
|
|
745
861
|
import pandas as pd # Optimize import performance
|
|
746
862
|
|
|
747
863
|
try:
|
|
864
|
+
# Create new objects using .create() method
|
|
748
865
|
new_objs = []
|
|
749
866
|
for cur_obj in new_models:
|
|
867
|
+
# Use .create() for new objects (id=0 or None)
|
|
868
|
+
cur_obj._ignore_has_changed = True
|
|
750
869
|
new_obj = cur_obj.create()
|
|
751
870
|
cur_obj.create_new_connecting_model(new_obj)
|
|
752
871
|
new_objs.append(cur_obj)
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
)
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
872
|
+
|
|
873
|
+
# Save IDs and all other fields to Excel
|
|
874
|
+
if new_objs:
|
|
875
|
+
# Create a list of dicts with all field values from created objects
|
|
876
|
+
obj_data = []
|
|
877
|
+
for obj in new_objs:
|
|
878
|
+
obj_dict = {"id_number": obj.id}
|
|
879
|
+
# Add all fields from obj_fields to ensure we capture API-populated fields
|
|
880
|
+
for field in obj_fields:
|
|
881
|
+
field_value = getattr(obj, field.field_name, None)
|
|
882
|
+
if field_value is not None:
|
|
883
|
+
obj_dict[field.field_name] = field_value
|
|
884
|
+
obj_data.append(obj_dict)
|
|
885
|
+
|
|
886
|
+
new_objs_df = pd.DataFrame(obj_data)
|
|
887
|
+
for file_name in [load_file_name]:
|
|
888
|
+
with pd.ExcelWriter(
|
|
889
|
+
os.path.join(workbook_path, file_name),
|
|
890
|
+
mode="a",
|
|
891
|
+
engine="openpyxl",
|
|
892
|
+
if_sheet_exists="overlay",
|
|
893
|
+
) as writer:
|
|
894
|
+
new_objs_df.to_excel(
|
|
895
|
+
writer,
|
|
896
|
+
sheet_name=obj_type + "_Ids",
|
|
897
|
+
index=False,
|
|
898
|
+
)
|
|
899
|
+
app.logger.info("%i total %s(s) were added to RegScale.", len(new_objs), obj_type)
|
|
770
900
|
except Exception as e:
|
|
771
901
|
app.logger.error(e)
|
|
772
902
|
|
|
@@ -793,26 +923,49 @@ def map_pandas_timestamp(date_time: "pd.Timestamp") -> Optional[str]:
|
|
|
793
923
|
return date_time or None
|
|
794
924
|
|
|
795
925
|
|
|
796
|
-
def load_model_for_id(api: Api, wb_data: dict, url: str) -> list:
|
|
926
|
+
def load_model_for_id(api: Api, wb_data: dict, url: str, obj_type: str) -> list:
|
|
797
927
|
"""
|
|
798
|
-
This method loads the current record for the updated objects.
|
|
928
|
+
This method loads the current record for the updated objects and returns model instances.
|
|
799
929
|
|
|
800
930
|
:param Api api: the API object instance to use
|
|
801
931
|
:param dict wb_data: The submitted workbook data in a dict
|
|
802
932
|
:param str url: the base url to use to retrieve the model data
|
|
803
|
-
:
|
|
933
|
+
:param str obj_type: The model type to cast the data to
|
|
934
|
+
:return: list of model instances of the specified type
|
|
804
935
|
:rtype: list
|
|
805
936
|
"""
|
|
806
937
|
load_data = []
|
|
938
|
+
failed_loads = []
|
|
939
|
+
|
|
940
|
+
logger.info(f"Loading {len(wb_data)} {obj_type}(s) from API for update...")
|
|
941
|
+
|
|
807
942
|
for cur_obj in wb_data:
|
|
808
943
|
obj = wb_data[cur_obj]
|
|
809
944
|
cur_id = int(obj["Id"])
|
|
810
945
|
if cur_id > 0:
|
|
811
946
|
url_to_use = url.replace("{id}", str(cur_id))
|
|
812
947
|
url_to_use = check_url_for_double_slash(url_to_use)
|
|
948
|
+
logger.debug(f"Fetching {obj_type} ID {cur_id} from {url_to_use}")
|
|
813
949
|
result = api.get(url_to_use)
|
|
814
950
|
if result.status_code == 200:
|
|
815
|
-
|
|
951
|
+
dict_data = result.json()
|
|
952
|
+
model_instance = cast_dict_as_model(dict_data, obj_type)
|
|
953
|
+
load_data.append(model_instance)
|
|
954
|
+
logger.debug(f"Successfully loaded {obj_type} ID {cur_id}")
|
|
955
|
+
else:
|
|
956
|
+
failed_loads.append((cur_id, result.status_code))
|
|
957
|
+
logger.warning(
|
|
958
|
+
f"Failed to load {obj_type} ID {cur_id} from API. Status code: {result.status_code}. "
|
|
959
|
+
f"This record will not be updated."
|
|
960
|
+
)
|
|
961
|
+
|
|
962
|
+
if failed_loads:
|
|
963
|
+
logger.warning(
|
|
964
|
+
f"Failed to load {len(failed_loads)} {obj_type}(s) from API: "
|
|
965
|
+
f"{', '.join([f'ID {id} (HTTP {code})' for id, code in failed_loads])}"
|
|
966
|
+
)
|
|
967
|
+
|
|
968
|
+
logger.info(f"Successfully loaded {len(load_data)} {obj_type}(s) from API for update.")
|
|
816
969
|
return load_data
|
|
817
970
|
|
|
818
971
|
|
|
@@ -945,7 +1098,9 @@ def map_workbook_to_lookups(file_path: str, workbook_data: Optional["pd.DataFram
|
|
|
945
1098
|
else:
|
|
946
1099
|
wb_data = pd.read_excel(file_path)
|
|
947
1100
|
|
|
948
|
-
|
|
1101
|
+
# Only drop rows where ALL values are NaN (completely empty rows)
|
|
1102
|
+
# Don't drop rows with some NaN values - those are legitimate records with optional empty fields
|
|
1103
|
+
wb_data = wb_data.dropna(how="all")
|
|
949
1104
|
for cur_row in obj_fields:
|
|
950
1105
|
if len(cur_row.lookup_field) > 0 and cur_row.lookup_field != "module":
|
|
951
1106
|
if cur_row.column_name in wb_data.columns:
|
regscale/core/login.py
CHANGED
|
@@ -6,6 +6,7 @@ from os import getenv
|
|
|
6
6
|
from typing import Optional, Tuple
|
|
7
7
|
from urllib.parse import urljoin
|
|
8
8
|
|
|
9
|
+
from requests.exceptions import HTTPError
|
|
9
10
|
from regscale.core.app.api import Api
|
|
10
11
|
from regscale.core.app.utils.app_utils import error_and_exit
|
|
11
12
|
|
|
@@ -14,10 +15,11 @@ logger = logging.getLogger("regscale")
|
|
|
14
15
|
|
|
15
16
|
def get_regscale_token(
|
|
16
17
|
api: Api,
|
|
17
|
-
username: str = getenv("REGSCALE_USER"),
|
|
18
|
-
password: str = getenv("REGSCALE_PASSWORD"),
|
|
19
|
-
domain: str = getenv("REGSCALE_DOMAIN"),
|
|
18
|
+
username: Optional[str] = getenv("REGSCALE_USER"),
|
|
19
|
+
password: Optional[str] = getenv("REGSCALE_PASSWORD"),
|
|
20
|
+
domain: Optional[str] = getenv("REGSCALE_DOMAIN"),
|
|
20
21
|
mfa_token: Optional[str] = "",
|
|
22
|
+
app_id: Optional[int] = 1,
|
|
21
23
|
) -> Tuple[str, str]:
|
|
22
24
|
"""
|
|
23
25
|
Authenticate with RegScale and return a token
|
|
@@ -27,6 +29,7 @@ def get_regscale_token(
|
|
|
27
29
|
:param str password: a string defaulting to the envar REGSCALE_PASSWORD
|
|
28
30
|
:param str domain: a string representing the RegScale domain, checks environment REGSCALE_DOMAIN
|
|
29
31
|
:param Optional[str] mfa_token: MFA token to login with
|
|
32
|
+
:param Optional[int] app_id: The app ID to login with
|
|
30
33
|
:raises EnvironmentError: if domain is not passed or retrieved
|
|
31
34
|
:return: a tuple of user_id and auth_token
|
|
32
35
|
:rtype: Tuple[str, str]
|
|
@@ -47,7 +50,19 @@ def get_regscale_token(
|
|
|
47
50
|
logger.info("Logging into: %s", domain)
|
|
48
51
|
# suggest structuring the login paths so that they all exist in one place
|
|
49
52
|
url = urljoin(domain, "/api/authentication/login")
|
|
50
|
-
|
|
53
|
+
try:
|
|
54
|
+
# Try to authenticate with the new API version
|
|
55
|
+
auth["appId"] = app_id
|
|
56
|
+
response = api.post(url=url, json=auth, headers={"X-Api-Version": "2.0"})
|
|
57
|
+
if response is None:
|
|
58
|
+
raise HTTPError("No response received from api.post(). Possible connection issue or internal error.")
|
|
59
|
+
response.raise_for_status()
|
|
60
|
+
app_id_compatible = True
|
|
61
|
+
except HTTPError:
|
|
62
|
+
# Fallback to the old API version
|
|
63
|
+
del auth["appId"]
|
|
64
|
+
response = api.post(url=url, json=auth, headers={})
|
|
65
|
+
app_id_compatible = False
|
|
51
66
|
error_msg = "Unable to authenticate with RegScale. Please check your credentials."
|
|
52
67
|
if response is None:
|
|
53
68
|
logger.error("No response received from api.post(). Possible connection issue or internal error.")
|
|
@@ -63,4 +78,6 @@ def get_regscale_token(
|
|
|
63
78
|
error_and_exit(f"{error_msg}\n{response.status_code}: {response.text}")
|
|
64
79
|
if isinstance(response_dict, str):
|
|
65
80
|
response_dict = json.loads(response_dict)
|
|
81
|
+
if app_id_compatible:
|
|
82
|
+
return response_dict["accessToken"]["id"], response_dict["accessToken"]["authToken"]
|
|
66
83
|
return response_dict["id"], response_dict["auth_token"]
|