regscale-cli 6.16.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of regscale-cli might be problematic. Click here for more details.
- regscale/__init__.py +1 -0
- regscale/airflow/__init__.py +9 -0
- regscale/airflow/azure/__init__.py +9 -0
- regscale/airflow/azure/cli.py +89 -0
- regscale/airflow/azure/upload_dags.py +116 -0
- regscale/airflow/click_dags.py +127 -0
- regscale/airflow/click_mixins.py +82 -0
- regscale/airflow/config.py +25 -0
- regscale/airflow/factories/__init__.py +0 -0
- regscale/airflow/factories/connections.py +58 -0
- regscale/airflow/factories/workflows.py +78 -0
- regscale/airflow/hierarchy.py +88 -0
- regscale/airflow/operators/__init__.py +0 -0
- regscale/airflow/operators/click.py +36 -0
- regscale/airflow/sensors/__init__.py +0 -0
- regscale/airflow/sensors/sql.py +107 -0
- regscale/airflow/sessions/__init__.py +0 -0
- regscale/airflow/sessions/sql/__init__.py +3 -0
- regscale/airflow/sessions/sql/queries.py +64 -0
- regscale/airflow/sessions/sql/sql_server_queries.py +248 -0
- regscale/airflow/tasks/__init__.py +0 -0
- regscale/airflow/tasks/branches.py +22 -0
- regscale/airflow/tasks/cli.py +116 -0
- regscale/airflow/tasks/click.py +73 -0
- regscale/airflow/tasks/debugging.py +9 -0
- regscale/airflow/tasks/groups.py +116 -0
- regscale/airflow/tasks/init.py +60 -0
- regscale/airflow/tasks/states.py +47 -0
- regscale/airflow/tasks/workflows.py +36 -0
- regscale/ansible/__init__.py +9 -0
- regscale/core/__init__.py +0 -0
- regscale/core/app/__init__.py +3 -0
- regscale/core/app/api.py +571 -0
- regscale/core/app/application.py +665 -0
- regscale/core/app/internal/__init__.py +136 -0
- regscale/core/app/internal/admin_actions.py +230 -0
- regscale/core/app/internal/assessments_editor.py +873 -0
- regscale/core/app/internal/catalog.py +316 -0
- regscale/core/app/internal/comparison.py +459 -0
- regscale/core/app/internal/control_editor.py +571 -0
- regscale/core/app/internal/encrypt.py +79 -0
- regscale/core/app/internal/evidence.py +1240 -0
- regscale/core/app/internal/file_uploads.py +151 -0
- regscale/core/app/internal/healthcheck.py +66 -0
- regscale/core/app/internal/login.py +305 -0
- regscale/core/app/internal/migrations.py +240 -0
- regscale/core/app/internal/model_editor.py +1701 -0
- regscale/core/app/internal/poam_editor.py +632 -0
- regscale/core/app/internal/workflow.py +105 -0
- regscale/core/app/logz.py +74 -0
- regscale/core/app/utils/XMLIR.py +258 -0
- regscale/core/app/utils/__init__.py +0 -0
- regscale/core/app/utils/api_handler.py +358 -0
- regscale/core/app/utils/app_utils.py +1110 -0
- regscale/core/app/utils/catalog_utils/__init__.py +0 -0
- regscale/core/app/utils/catalog_utils/common.py +91 -0
- regscale/core/app/utils/catalog_utils/compare_catalog.py +193 -0
- regscale/core/app/utils/catalog_utils/diagnostic_catalog.py +97 -0
- regscale/core/app/utils/catalog_utils/download_catalog.py +103 -0
- regscale/core/app/utils/catalog_utils/update_catalog.py +718 -0
- regscale/core/app/utils/catalog_utils/update_catalog_v2.py +1378 -0
- regscale/core/app/utils/catalog_utils/update_catalog_v3.py +1272 -0
- regscale/core/app/utils/catalog_utils/update_plans.py +334 -0
- regscale/core/app/utils/file_utils.py +238 -0
- regscale/core/app/utils/parser_utils.py +81 -0
- regscale/core/app/utils/pickle_file_handler.py +57 -0
- regscale/core/app/utils/regscale_utils.py +319 -0
- regscale/core/app/utils/report_utils.py +119 -0
- regscale/core/app/utils/variables.py +226 -0
- regscale/core/decorators.py +31 -0
- regscale/core/lazy_group.py +65 -0
- regscale/core/login.py +63 -0
- regscale/core/server/__init__.py +0 -0
- regscale/core/server/flask_api.py +473 -0
- regscale/core/server/helpers.py +373 -0
- regscale/core/server/rest.py +64 -0
- regscale/core/server/static/css/bootstrap.css +6030 -0
- regscale/core/server/static/css/bootstrap.min.css +6 -0
- regscale/core/server/static/css/main.css +176 -0
- regscale/core/server/static/images/regscale-cli.svg +49 -0
- regscale/core/server/static/images/regscale.svg +38 -0
- regscale/core/server/templates/base.html +74 -0
- regscale/core/server/templates/index.html +43 -0
- regscale/core/server/templates/login.html +28 -0
- regscale/core/server/templates/make_base64.html +22 -0
- regscale/core/server/templates/upload_STIG.html +109 -0
- regscale/core/server/templates/upload_STIG_result.html +26 -0
- regscale/core/server/templates/upload_ssp.html +144 -0
- regscale/core/server/templates/upload_ssp_result.html +128 -0
- regscale/core/static/__init__.py +0 -0
- regscale/core/static/regex.py +14 -0
- regscale/core/utils/__init__.py +117 -0
- regscale/core/utils/click_utils.py +13 -0
- regscale/core/utils/date.py +238 -0
- regscale/core/utils/graphql.py +254 -0
- regscale/core/utils/urls.py +23 -0
- regscale/dev/__init__.py +6 -0
- regscale/dev/analysis.py +454 -0
- regscale/dev/cli.py +235 -0
- regscale/dev/code_gen.py +492 -0
- regscale/dev/dirs.py +69 -0
- regscale/dev/docs.py +384 -0
- regscale/dev/monitoring.py +26 -0
- regscale/dev/profiling.py +216 -0
- regscale/exceptions/__init__.py +4 -0
- regscale/exceptions/license_exception.py +7 -0
- regscale/exceptions/validation_exception.py +9 -0
- regscale/integrations/__init__.py +1 -0
- regscale/integrations/commercial/__init__.py +486 -0
- regscale/integrations/commercial/ad.py +433 -0
- regscale/integrations/commercial/amazon/__init__.py +0 -0
- regscale/integrations/commercial/amazon/common.py +106 -0
- regscale/integrations/commercial/aqua/__init__.py +0 -0
- regscale/integrations/commercial/aqua/aqua.py +91 -0
- regscale/integrations/commercial/aws/__init__.py +6 -0
- regscale/integrations/commercial/aws/cli.py +322 -0
- regscale/integrations/commercial/aws/inventory/__init__.py +110 -0
- regscale/integrations/commercial/aws/inventory/base.py +64 -0
- regscale/integrations/commercial/aws/inventory/resources/__init__.py +19 -0
- regscale/integrations/commercial/aws/inventory/resources/compute.py +234 -0
- regscale/integrations/commercial/aws/inventory/resources/containers.py +113 -0
- regscale/integrations/commercial/aws/inventory/resources/database.py +101 -0
- regscale/integrations/commercial/aws/inventory/resources/integration.py +237 -0
- regscale/integrations/commercial/aws/inventory/resources/networking.py +253 -0
- regscale/integrations/commercial/aws/inventory/resources/security.py +240 -0
- regscale/integrations/commercial/aws/inventory/resources/storage.py +91 -0
- regscale/integrations/commercial/aws/scanner.py +823 -0
- regscale/integrations/commercial/azure/__init__.py +0 -0
- regscale/integrations/commercial/azure/common.py +32 -0
- regscale/integrations/commercial/azure/intune.py +488 -0
- regscale/integrations/commercial/azure/scanner.py +49 -0
- regscale/integrations/commercial/burp.py +78 -0
- regscale/integrations/commercial/cpe.py +144 -0
- regscale/integrations/commercial/crowdstrike.py +1117 -0
- regscale/integrations/commercial/defender.py +1511 -0
- regscale/integrations/commercial/dependabot.py +210 -0
- regscale/integrations/commercial/durosuite/__init__.py +0 -0
- regscale/integrations/commercial/durosuite/api.py +1546 -0
- regscale/integrations/commercial/durosuite/process_devices.py +101 -0
- regscale/integrations/commercial/durosuite/scanner.py +637 -0
- regscale/integrations/commercial/durosuite/variables.py +21 -0
- regscale/integrations/commercial/ecr.py +90 -0
- regscale/integrations/commercial/gcp/__init__.py +237 -0
- regscale/integrations/commercial/gcp/auth.py +96 -0
- regscale/integrations/commercial/gcp/control_tests.py +238 -0
- regscale/integrations/commercial/gcp/variables.py +18 -0
- regscale/integrations/commercial/gitlab.py +332 -0
- regscale/integrations/commercial/grype.py +165 -0
- regscale/integrations/commercial/ibm.py +90 -0
- regscale/integrations/commercial/import_all/__init__.py +0 -0
- regscale/integrations/commercial/import_all/import_all_cmd.py +467 -0
- regscale/integrations/commercial/import_all/scan_file_fingerprints.json +27 -0
- regscale/integrations/commercial/jira.py +1046 -0
- regscale/integrations/commercial/mappings/__init__.py +0 -0
- regscale/integrations/commercial/mappings/csf_controls.json +713 -0
- regscale/integrations/commercial/mappings/nist_800_53_r5_controls.json +1516 -0
- regscale/integrations/commercial/nessus/__init__.py +0 -0
- regscale/integrations/commercial/nessus/nessus_utils.py +429 -0
- regscale/integrations/commercial/nessus/scanner.py +416 -0
- regscale/integrations/commercial/nexpose.py +90 -0
- regscale/integrations/commercial/okta.py +798 -0
- regscale/integrations/commercial/opentext/__init__.py +0 -0
- regscale/integrations/commercial/opentext/click.py +99 -0
- regscale/integrations/commercial/opentext/scanner.py +143 -0
- regscale/integrations/commercial/prisma.py +91 -0
- regscale/integrations/commercial/qualys.py +1462 -0
- regscale/integrations/commercial/salesforce.py +980 -0
- regscale/integrations/commercial/sap/__init__.py +0 -0
- regscale/integrations/commercial/sap/click.py +31 -0
- regscale/integrations/commercial/sap/sysdig/__init__.py +0 -0
- regscale/integrations/commercial/sap/sysdig/click.py +57 -0
- regscale/integrations/commercial/sap/sysdig/sysdig_scanner.py +190 -0
- regscale/integrations/commercial/sap/tenable/__init__.py +0 -0
- regscale/integrations/commercial/sap/tenable/click.py +49 -0
- regscale/integrations/commercial/sap/tenable/scanner.py +196 -0
- regscale/integrations/commercial/servicenow.py +1756 -0
- regscale/integrations/commercial/sicura/__init__.py +0 -0
- regscale/integrations/commercial/sicura/api.py +855 -0
- regscale/integrations/commercial/sicura/commands.py +73 -0
- regscale/integrations/commercial/sicura/scanner.py +481 -0
- regscale/integrations/commercial/sicura/variables.py +16 -0
- regscale/integrations/commercial/snyk.py +90 -0
- regscale/integrations/commercial/sonarcloud.py +260 -0
- regscale/integrations/commercial/sqlserver.py +369 -0
- regscale/integrations/commercial/stig_mapper_integration/__init__.py +0 -0
- regscale/integrations/commercial/stig_mapper_integration/click_commands.py +38 -0
- regscale/integrations/commercial/stig_mapper_integration/mapping_engine.py +353 -0
- regscale/integrations/commercial/stigv2/__init__.py +0 -0
- regscale/integrations/commercial/stigv2/ckl_parser.py +349 -0
- regscale/integrations/commercial/stigv2/click_commands.py +95 -0
- regscale/integrations/commercial/stigv2/stig_integration.py +202 -0
- regscale/integrations/commercial/synqly/__init__.py +0 -0
- regscale/integrations/commercial/synqly/assets.py +46 -0
- regscale/integrations/commercial/synqly/ticketing.py +132 -0
- regscale/integrations/commercial/synqly/vulnerabilities.py +223 -0
- regscale/integrations/commercial/synqly_jira.py +840 -0
- regscale/integrations/commercial/tenablev2/__init__.py +0 -0
- regscale/integrations/commercial/tenablev2/authenticate.py +31 -0
- regscale/integrations/commercial/tenablev2/click.py +1584 -0
- regscale/integrations/commercial/tenablev2/scanner.py +504 -0
- regscale/integrations/commercial/tenablev2/stig_parsers.py +140 -0
- regscale/integrations/commercial/tenablev2/utils.py +78 -0
- regscale/integrations/commercial/tenablev2/variables.py +17 -0
- regscale/integrations/commercial/trivy.py +162 -0
- regscale/integrations/commercial/veracode.py +96 -0
- regscale/integrations/commercial/wizv2/WizDataMixin.py +97 -0
- regscale/integrations/commercial/wizv2/__init__.py +0 -0
- regscale/integrations/commercial/wizv2/click.py +429 -0
- regscale/integrations/commercial/wizv2/constants.py +1001 -0
- regscale/integrations/commercial/wizv2/issue.py +361 -0
- regscale/integrations/commercial/wizv2/models.py +112 -0
- regscale/integrations/commercial/wizv2/parsers.py +339 -0
- regscale/integrations/commercial/wizv2/sbom.py +115 -0
- regscale/integrations/commercial/wizv2/scanner.py +416 -0
- regscale/integrations/commercial/wizv2/utils.py +796 -0
- regscale/integrations/commercial/wizv2/variables.py +39 -0
- regscale/integrations/commercial/wizv2/wiz_auth.py +159 -0
- regscale/integrations/commercial/xray.py +91 -0
- regscale/integrations/integration/__init__.py +2 -0
- regscale/integrations/integration/integration.py +26 -0
- regscale/integrations/integration/inventory.py +17 -0
- regscale/integrations/integration/issue.py +100 -0
- regscale/integrations/integration_override.py +149 -0
- regscale/integrations/public/__init__.py +103 -0
- regscale/integrations/public/cisa.py +641 -0
- regscale/integrations/public/criticality_updater.py +70 -0
- regscale/integrations/public/emass.py +411 -0
- regscale/integrations/public/emass_slcm_import.py +697 -0
- regscale/integrations/public/fedramp/__init__.py +0 -0
- regscale/integrations/public/fedramp/appendix_parser.py +548 -0
- regscale/integrations/public/fedramp/click.py +479 -0
- regscale/integrations/public/fedramp/components.py +714 -0
- regscale/integrations/public/fedramp/docx_parser.py +259 -0
- regscale/integrations/public/fedramp/fedramp_cis_crm.py +1124 -0
- regscale/integrations/public/fedramp/fedramp_common.py +3181 -0
- regscale/integrations/public/fedramp/fedramp_docx.py +388 -0
- regscale/integrations/public/fedramp/fedramp_five.py +2343 -0
- regscale/integrations/public/fedramp/fedramp_traversal.py +138 -0
- regscale/integrations/public/fedramp/import_fedramp_r4_ssp.py +279 -0
- regscale/integrations/public/fedramp/import_workbook.py +495 -0
- regscale/integrations/public/fedramp/inventory_items.py +244 -0
- regscale/integrations/public/fedramp/mappings/__init__.py +0 -0
- regscale/integrations/public/fedramp/mappings/fedramp_r4_parts.json +7388 -0
- regscale/integrations/public/fedramp/mappings/fedramp_r5_params.json +8636 -0
- regscale/integrations/public/fedramp/mappings/fedramp_r5_parts.json +9605 -0
- regscale/integrations/public/fedramp/mappings/system_roles.py +34 -0
- regscale/integrations/public/fedramp/mappings/user.py +175 -0
- regscale/integrations/public/fedramp/mappings/values.py +141 -0
- regscale/integrations/public/fedramp/markdown_parser.py +150 -0
- regscale/integrations/public/fedramp/metadata.py +689 -0
- regscale/integrations/public/fedramp/models/__init__.py +59 -0
- regscale/integrations/public/fedramp/models/leveraged_auth_new.py +168 -0
- regscale/integrations/public/fedramp/models/poam_importer.py +522 -0
- regscale/integrations/public/fedramp/parts_mapper.py +107 -0
- regscale/integrations/public/fedramp/poam/__init__.py +0 -0
- regscale/integrations/public/fedramp/poam/scanner.py +851 -0
- regscale/integrations/public/fedramp/properties.py +201 -0
- regscale/integrations/public/fedramp/reporting.py +84 -0
- regscale/integrations/public/fedramp/resources.py +496 -0
- regscale/integrations/public/fedramp/rosetta.py +110 -0
- regscale/integrations/public/fedramp/ssp_logger.py +87 -0
- regscale/integrations/public/fedramp/system_characteristics.py +922 -0
- regscale/integrations/public/fedramp/system_control_implementations.py +582 -0
- regscale/integrations/public/fedramp/system_implementation.py +190 -0
- regscale/integrations/public/fedramp/xml_utils.py +87 -0
- regscale/integrations/public/nist_catalog.py +275 -0
- regscale/integrations/public/oscal.py +1946 -0
- regscale/integrations/public/otx.py +169 -0
- regscale/integrations/scanner_integration.py +2692 -0
- regscale/integrations/variables.py +25 -0
- regscale/models/__init__.py +7 -0
- regscale/models/app_models/__init__.py +5 -0
- regscale/models/app_models/catalog_compare.py +213 -0
- regscale/models/app_models/click.py +252 -0
- regscale/models/app_models/datetime_encoder.py +21 -0
- regscale/models/app_models/import_validater.py +321 -0
- regscale/models/app_models/mapping.py +260 -0
- regscale/models/app_models/pipeline.py +37 -0
- regscale/models/click_models.py +413 -0
- regscale/models/config.py +154 -0
- regscale/models/email_style.css +67 -0
- regscale/models/hierarchy.py +8 -0
- regscale/models/inspect_models.py +79 -0
- regscale/models/integration_models/__init__.py +0 -0
- regscale/models/integration_models/amazon_models/__init__.py +0 -0
- regscale/models/integration_models/amazon_models/inspector.py +262 -0
- regscale/models/integration_models/amazon_models/inspector_scan.py +206 -0
- regscale/models/integration_models/aqua.py +247 -0
- regscale/models/integration_models/azure_alerts.py +255 -0
- regscale/models/integration_models/base64.py +23 -0
- regscale/models/integration_models/burp.py +433 -0
- regscale/models/integration_models/burp_models.py +128 -0
- regscale/models/integration_models/cisa_kev_data.json +19333 -0
- regscale/models/integration_models/defender_data.py +93 -0
- regscale/models/integration_models/defenderimport.py +143 -0
- regscale/models/integration_models/drf.py +443 -0
- regscale/models/integration_models/ecr_models/__init__.py +0 -0
- regscale/models/integration_models/ecr_models/data.py +69 -0
- regscale/models/integration_models/ecr_models/ecr.py +239 -0
- regscale/models/integration_models/flat_file_importer.py +1079 -0
- regscale/models/integration_models/grype_import.py +247 -0
- regscale/models/integration_models/ibm.py +126 -0
- regscale/models/integration_models/implementation_results.py +85 -0
- regscale/models/integration_models/nexpose.py +140 -0
- regscale/models/integration_models/prisma.py +202 -0
- regscale/models/integration_models/qualys.py +720 -0
- regscale/models/integration_models/qualys_scanner.py +160 -0
- regscale/models/integration_models/sbom/__init__.py +0 -0
- regscale/models/integration_models/sbom/cyclone_dx.py +139 -0
- regscale/models/integration_models/send_reminders.py +620 -0
- regscale/models/integration_models/snyk.py +155 -0
- regscale/models/integration_models/synqly_models/__init__.py +0 -0
- regscale/models/integration_models/synqly_models/capabilities.json +1 -0
- regscale/models/integration_models/synqly_models/connector_types.py +22 -0
- regscale/models/integration_models/synqly_models/connectors/__init__.py +7 -0
- regscale/models/integration_models/synqly_models/connectors/assets.py +97 -0
- regscale/models/integration_models/synqly_models/connectors/ticketing.py +583 -0
- regscale/models/integration_models/synqly_models/connectors/vulnerabilities.py +169 -0
- regscale/models/integration_models/synqly_models/ocsf_mapper.py +331 -0
- regscale/models/integration_models/synqly_models/param.py +72 -0
- regscale/models/integration_models/synqly_models/synqly_model.py +733 -0
- regscale/models/integration_models/synqly_models/tenants.py +39 -0
- regscale/models/integration_models/tenable_models/__init__.py +0 -0
- regscale/models/integration_models/tenable_models/integration.py +187 -0
- regscale/models/integration_models/tenable_models/models.py +513 -0
- regscale/models/integration_models/trivy_import.py +231 -0
- regscale/models/integration_models/veracode.py +217 -0
- regscale/models/integration_models/xray.py +135 -0
- regscale/models/locking.py +100 -0
- regscale/models/platform.py +110 -0
- regscale/models/regscale_models/__init__.py +67 -0
- regscale/models/regscale_models/assessment.py +570 -0
- regscale/models/regscale_models/assessment_plan.py +52 -0
- regscale/models/regscale_models/asset.py +567 -0
- regscale/models/regscale_models/asset_mapping.py +190 -0
- regscale/models/regscale_models/case.py +42 -0
- regscale/models/regscale_models/catalog.py +261 -0
- regscale/models/regscale_models/cci.py +46 -0
- regscale/models/regscale_models/change.py +167 -0
- regscale/models/regscale_models/checklist.py +372 -0
- regscale/models/regscale_models/comment.py +49 -0
- regscale/models/regscale_models/compliance_settings.py +112 -0
- regscale/models/regscale_models/component.py +412 -0
- regscale/models/regscale_models/component_mapping.py +65 -0
- regscale/models/regscale_models/control.py +38 -0
- regscale/models/regscale_models/control_implementation.py +1128 -0
- regscale/models/regscale_models/control_objective.py +261 -0
- regscale/models/regscale_models/control_parameter.py +100 -0
- regscale/models/regscale_models/control_test.py +34 -0
- regscale/models/regscale_models/control_test_plan.py +75 -0
- regscale/models/regscale_models/control_test_result.py +52 -0
- regscale/models/regscale_models/custom_field.py +245 -0
- regscale/models/regscale_models/data.py +109 -0
- regscale/models/regscale_models/data_center.py +40 -0
- regscale/models/regscale_models/deviation.py +203 -0
- regscale/models/regscale_models/email.py +97 -0
- regscale/models/regscale_models/evidence.py +47 -0
- regscale/models/regscale_models/evidence_mapping.py +40 -0
- regscale/models/regscale_models/facility.py +59 -0
- regscale/models/regscale_models/file.py +382 -0
- regscale/models/regscale_models/filetag.py +37 -0
- regscale/models/regscale_models/form_field_value.py +94 -0
- regscale/models/regscale_models/group.py +169 -0
- regscale/models/regscale_models/implementation_objective.py +335 -0
- regscale/models/regscale_models/implementation_option.py +275 -0
- regscale/models/regscale_models/implementation_role.py +33 -0
- regscale/models/regscale_models/incident.py +177 -0
- regscale/models/regscale_models/interconnection.py +43 -0
- regscale/models/regscale_models/issue.py +1176 -0
- regscale/models/regscale_models/leveraged_authorization.py +125 -0
- regscale/models/regscale_models/line_of_inquiry.py +52 -0
- regscale/models/regscale_models/link.py +205 -0
- regscale/models/regscale_models/meta_data.py +64 -0
- regscale/models/regscale_models/mixins/__init__.py +0 -0
- regscale/models/regscale_models/mixins/parent_cache.py +124 -0
- regscale/models/regscale_models/module.py +224 -0
- regscale/models/regscale_models/modules.py +191 -0
- regscale/models/regscale_models/objective.py +14 -0
- regscale/models/regscale_models/parameter.py +87 -0
- regscale/models/regscale_models/ports_protocol.py +81 -0
- regscale/models/regscale_models/privacy.py +89 -0
- regscale/models/regscale_models/profile.py +50 -0
- regscale/models/regscale_models/profile_link.py +68 -0
- regscale/models/regscale_models/profile_mapping.py +124 -0
- regscale/models/regscale_models/project.py +63 -0
- regscale/models/regscale_models/property.py +278 -0
- regscale/models/regscale_models/question.py +85 -0
- regscale/models/regscale_models/questionnaire.py +87 -0
- regscale/models/regscale_models/questionnaire_instance.py +177 -0
- regscale/models/regscale_models/rbac.py +132 -0
- regscale/models/regscale_models/reference.py +86 -0
- regscale/models/regscale_models/regscale_model.py +1643 -0
- regscale/models/regscale_models/requirement.py +29 -0
- regscale/models/regscale_models/risk.py +274 -0
- regscale/models/regscale_models/sbom.py +54 -0
- regscale/models/regscale_models/scan_history.py +436 -0
- regscale/models/regscale_models/search.py +53 -0
- regscale/models/regscale_models/security_control.py +132 -0
- regscale/models/regscale_models/security_plan.py +204 -0
- regscale/models/regscale_models/software_inventory.py +159 -0
- regscale/models/regscale_models/stake_holder.py +64 -0
- regscale/models/regscale_models/stig.py +647 -0
- regscale/models/regscale_models/supply_chain.py +152 -0
- regscale/models/regscale_models/system_role.py +188 -0
- regscale/models/regscale_models/system_role_external_assignment.py +40 -0
- regscale/models/regscale_models/tag.py +37 -0
- regscale/models/regscale_models/tag_mapping.py +19 -0
- regscale/models/regscale_models/task.py +133 -0
- regscale/models/regscale_models/threat.py +196 -0
- regscale/models/regscale_models/user.py +175 -0
- regscale/models/regscale_models/user_group.py +55 -0
- regscale/models/regscale_models/vulnerability.py +242 -0
- regscale/models/regscale_models/vulnerability_mapping.py +162 -0
- regscale/models/regscale_models/workflow.py +55 -0
- regscale/models/regscale_models/workflow_action.py +34 -0
- regscale/models/regscale_models/workflow_instance.py +269 -0
- regscale/models/regscale_models/workflow_instance_step.py +114 -0
- regscale/models/regscale_models/workflow_template.py +58 -0
- regscale/models/regscale_models/workflow_template_step.py +45 -0
- regscale/regscale.py +815 -0
- regscale/utils/__init__.py +7 -0
- regscale/utils/b64conversion.py +14 -0
- regscale/utils/click_utils.py +118 -0
- regscale/utils/decorators.py +48 -0
- regscale/utils/dict_utils.py +59 -0
- regscale/utils/files.py +79 -0
- regscale/utils/fxns.py +30 -0
- regscale/utils/graphql_client.py +113 -0
- regscale/utils/lists.py +16 -0
- regscale/utils/numbers.py +12 -0
- regscale/utils/shell.py +148 -0
- regscale/utils/string.py +121 -0
- regscale/utils/synqly_utils.py +165 -0
- regscale/utils/threading/__init__.py +8 -0
- regscale/utils/threading/threadhandler.py +131 -0
- regscale/utils/threading/threadsafe_counter.py +47 -0
- regscale/utils/threading/threadsafe_dict.py +242 -0
- regscale/utils/threading/threadsafe_list.py +83 -0
- regscale/utils/version.py +104 -0
- regscale/validation/__init__.py +0 -0
- regscale/validation/address.py +37 -0
- regscale/validation/record.py +48 -0
- regscale/visualization/__init__.py +5 -0
- regscale/visualization/click.py +34 -0
- regscale_cli-6.16.0.0.dist-info/LICENSE +21 -0
- regscale_cli-6.16.0.0.dist-info/METADATA +659 -0
- regscale_cli-6.16.0.0.dist-info/RECORD +481 -0
- regscale_cli-6.16.0.0.dist-info/WHEEL +5 -0
- regscale_cli-6.16.0.0.dist-info/entry_points.txt +6 -0
- regscale_cli-6.16.0.0.dist-info/top_level.txt +2 -0
- tests/fixtures/__init__.py +2 -0
- tests/fixtures/api.py +87 -0
- tests/fixtures/models.py +91 -0
- tests/fixtures/test_fixture.py +144 -0
- tests/mocks/__init__.py +0 -0
- tests/mocks/objects.py +3 -0
- tests/mocks/response.py +32 -0
- tests/mocks/xml.py +13 -0
- tests/regscale/__init__.py +0 -0
- tests/regscale/core/__init__.py +0 -0
- tests/regscale/core/test_api.py +232 -0
- tests/regscale/core/test_app.py +406 -0
- tests/regscale/core/test_login.py +37 -0
- tests/regscale/core/test_logz.py +66 -0
- tests/regscale/core/test_sbom_generator.py +87 -0
- tests/regscale/core/test_validation_utils.py +163 -0
- tests/regscale/core/test_version.py +78 -0
- tests/regscale/models/__init__.py +0 -0
- tests/regscale/models/test_asset.py +71 -0
- tests/regscale/models/test_config.py +26 -0
- tests/regscale/models/test_control_implementation.py +27 -0
- tests/regscale/models/test_import.py +97 -0
- tests/regscale/models/test_issue.py +36 -0
- tests/regscale/models/test_mapping.py +52 -0
- tests/regscale/models/test_platform.py +31 -0
- tests/regscale/models/test_regscale_model.py +346 -0
- tests/regscale/models/test_report.py +32 -0
- tests/regscale/models/test_tenable_integrations.py +118 -0
- tests/regscale/models/test_user_model.py +121 -0
- tests/regscale/test_about.py +19 -0
- tests/regscale/test_authorization.py +65 -0
|
@@ -0,0 +1,1701 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# pylint: disable=C0302
|
|
4
|
+
"""Module to allow user to make changes to certain models in an Excel
|
|
5
|
+
spreadsheet for user-friendly experience"""
|
|
6
|
+
|
|
7
|
+
# standard python imports
|
|
8
|
+
import logging
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
import pandas as pd # Type Checking
|
|
13
|
+
|
|
14
|
+
import math
|
|
15
|
+
import os
|
|
16
|
+
import shutil
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Optional, Union, Any
|
|
19
|
+
from operator import attrgetter
|
|
20
|
+
|
|
21
|
+
import click
|
|
22
|
+
from openpyxl import Workbook, load_workbook
|
|
23
|
+
from openpyxl.styles import Protection, Font, NamedStyle
|
|
24
|
+
from openpyxl.worksheet.worksheet import Worksheet
|
|
25
|
+
from openpyxl.worksheet.datavalidation import DataValidation
|
|
26
|
+
from pydantic.fields import FieldInfo
|
|
27
|
+
|
|
28
|
+
from regscale.core.app.api import Api
|
|
29
|
+
from regscale.core.app.application import Application
|
|
30
|
+
from regscale.core.app.logz import create_logger
|
|
31
|
+
from regscale.core.app.utils.app_utils import (
|
|
32
|
+
check_file_path,
|
|
33
|
+
error_and_exit,
|
|
34
|
+
get_user_names,
|
|
35
|
+
check_empty_nan,
|
|
36
|
+
)
|
|
37
|
+
from regscale.models.app_models.click import regscale_id, regscale_module
|
|
38
|
+
from regscale.models.regscale_models.facility import Facility
|
|
39
|
+
from regscale.models.regscale_models.assessment import Assessment
|
|
40
|
+
from regscale.models.regscale_models.modules import Modules
|
|
41
|
+
from regscale.models.regscale_models.control import Control
|
|
42
|
+
from regscale.models.regscale_models.control_implementation import ControlImplementation
|
|
43
|
+
from regscale.models.regscale_models.issue import Issue
|
|
44
|
+
from regscale.models.regscale_models.asset import Asset
|
|
45
|
+
from regscale.models.regscale_models.component import Component, ComponentType
|
|
46
|
+
from regscale.models.regscale_models.risk import Risk
|
|
47
|
+
|
|
48
|
+
# Task should be included, but doesn't have a model in
|
|
49
|
+
# regscale.models.regscale_models yet.
|
|
50
|
+
# from regscale.models.regscale_models
|
|
51
|
+
|
|
52
|
+
ALL_PRE = "All_"
|
|
53
|
+
NEW_PRE = "New_"
|
|
54
|
+
OLD_PRE = "Old_"
|
|
55
|
+
FILE_POST = "s.xlsx"
|
|
56
|
+
DIFFERENCES_FILE = "differences.txt"
|
|
57
|
+
SELECT_PROMPT = "Please select an option from the dropdown list."
|
|
58
|
+
DATE_ENTRY_PROMPT = "Please enter a valid date in the following format: mm/dd/yyyy"
|
|
59
|
+
SELECTION_ERROR = "Your entry is not one of the available options."
|
|
60
|
+
INVALID_ENTRY_ERROR = "Your entry is not a valid option."
|
|
61
|
+
INVALID_ENTRY_TITLE = "Invalid Entry"
|
|
62
|
+
|
|
63
|
+
logger = logging.getLogger("regscale")
|
|
64
|
+
|
|
65
|
+
exclude_fields = [
|
|
66
|
+
"uuid",
|
|
67
|
+
"createdBy",
|
|
68
|
+
"createdById",
|
|
69
|
+
"lastUpdatedBy",
|
|
70
|
+
"lastUpdatedById",
|
|
71
|
+
"dateLastUpdated",
|
|
72
|
+
"dateCreated",
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
# pylint: disable=R0902,R0903
|
|
77
|
+
class FieldMakeup:
|
|
78
|
+
"""
|
|
79
|
+
This class is for holding metadata about each field in the model being processed.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def __init__(self, field_name: str, col_name: str, data_type: str):
|
|
83
|
+
self.field_name = field_name
|
|
84
|
+
self.column_name = col_name
|
|
85
|
+
self.data_type = data_type
|
|
86
|
+
self.sort_order = 0
|
|
87
|
+
self.lookup_field = ""
|
|
88
|
+
self.enum_values = []
|
|
89
|
+
self.treat_as_date = False
|
|
90
|
+
self.cell_col = ""
|
|
91
|
+
self.required = False
|
|
92
|
+
self.treat_enum_as_lookup = False
|
|
93
|
+
|
|
94
|
+
def __post_init__(self):
|
|
95
|
+
if self.data_type == "bool":
|
|
96
|
+
self.enum_values = ["TRUE", "FALSE"]
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# pylint: enable=R0902,R0903
|
|
100
|
+
|
|
101
|
+
obj_fields = []
|
|
102
|
+
include_fields = []
|
|
103
|
+
|
|
104
|
+
lookup_dfs = {}
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@click.group(name="model")
|
|
108
|
+
def model():
|
|
109
|
+
"""
|
|
110
|
+
Performs actions on CLI models Feature to update issues to RegScale.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
# Make Empty Spreadsheet for creating new assessments.
|
|
115
|
+
@model.command(name="new")
|
|
116
|
+
@click.option(
|
|
117
|
+
"--path",
|
|
118
|
+
type=click.Path(exists=False, dir_okay=True, path_type=Path),
|
|
119
|
+
help="Provide the desired path for excel files to be generated into.",
|
|
120
|
+
default=os.path.join(os.getcwd(), "artifacts"),
|
|
121
|
+
required=True,
|
|
122
|
+
)
|
|
123
|
+
@click.option(
|
|
124
|
+
"--model",
|
|
125
|
+
type=click.Choice(
|
|
126
|
+
[
|
|
127
|
+
"assessment",
|
|
128
|
+
"issue",
|
|
129
|
+
"component",
|
|
130
|
+
"asset",
|
|
131
|
+
], # , 'risk'], #, 'task'],
|
|
132
|
+
case_sensitive=False,
|
|
133
|
+
),
|
|
134
|
+
help="Specify the type of new bulk load file to create.",
|
|
135
|
+
default="assessment",
|
|
136
|
+
required=True,
|
|
137
|
+
)
|
|
138
|
+
def generate_new_file(path: Path, model: str):
|
|
139
|
+
"""This function will build an Excel spreadsheet for users to be
|
|
140
|
+
able to create new assessments."""
|
|
141
|
+
new_file(path, model)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
# pylint: disable=W0612
|
|
145
|
+
def new_file(path: Path, obj_type: str) -> None:
|
|
146
|
+
"""
|
|
147
|
+
Function to build Excel spreadsheet for creation of new assessments
|
|
148
|
+
|
|
149
|
+
:param Path path: directory of file location
|
|
150
|
+
:param str obj_type: type of new spreadsheet to create
|
|
151
|
+
:return: None
|
|
152
|
+
:rtype: None
|
|
153
|
+
"""
|
|
154
|
+
check_file_path(path)
|
|
155
|
+
|
|
156
|
+
# get model specified
|
|
157
|
+
obj = get_obj(obj_type) # noqa F841
|
|
158
|
+
|
|
159
|
+
if not obj.is_new_excel_record_allowed():
|
|
160
|
+
logger.warning("Creating new records for this model type in Excel spreadsheets are not allowed.")
|
|
161
|
+
return
|
|
162
|
+
# build workbook
|
|
163
|
+
# create excel file and setting formatting
|
|
164
|
+
|
|
165
|
+
workbook_title = get_workbook_title(obj_type, NEW_PRE, "s")
|
|
166
|
+
workbook_filename = get_workbook_title(obj_type, NEW_PRE, FILE_POST)
|
|
167
|
+
build_workbook(path, workbook_filename, workbook_title) # noqa F841
|
|
168
|
+
|
|
169
|
+
logger.info(f"Your excel workbook has been created. Please open {workbook_filename} and add new {obj_type}s.")
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
# pylint: enable=W0612
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
@model.command(name="generate")
|
|
176
|
+
@regscale_id()
|
|
177
|
+
@regscale_module()
|
|
178
|
+
@click.option(
|
|
179
|
+
"--path",
|
|
180
|
+
type=click.Path(exists=False, dir_okay=True, path_type=Path),
|
|
181
|
+
help="Provide the desired path for excel files to be generated into.",
|
|
182
|
+
default=os.path.join(os.getcwd(), "artifacts"),
|
|
183
|
+
required=True,
|
|
184
|
+
)
|
|
185
|
+
@click.option(
|
|
186
|
+
"--model",
|
|
187
|
+
type=click.Choice(
|
|
188
|
+
[
|
|
189
|
+
"assessment",
|
|
190
|
+
"control",
|
|
191
|
+
"issue",
|
|
192
|
+
"component",
|
|
193
|
+
"asset",
|
|
194
|
+
], # , 'risk'], #, 'task'],
|
|
195
|
+
case_sensitive=False,
|
|
196
|
+
),
|
|
197
|
+
help="Specify the type of bulk load file to generate.",
|
|
198
|
+
default="assessment",
|
|
199
|
+
required=True,
|
|
200
|
+
)
|
|
201
|
+
def generate(regscale_id: int, regscale_module: str, path: Path, model: str):
|
|
202
|
+
"""
|
|
203
|
+
This function will build and populate a spreadsheet of all assessments
|
|
204
|
+
with the selected RegScale Parent Id and RegScale Module for users to any necessary edits.
|
|
205
|
+
"""
|
|
206
|
+
all_of_model(parent_id=regscale_id, parent_module=regscale_module, path=path, obj_type=model)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def all_of_model(parent_id: int, parent_module: str, path: Path, obj_type: str) -> None:
|
|
210
|
+
"""
|
|
211
|
+
This function will pull all records of the type specified for the parent ID and module
|
|
212
|
+
specified, populate an Excel spreadsheet with the data, and save it in the specified
|
|
213
|
+
folder for the user to edit as appropriate.
|
|
214
|
+
|
|
215
|
+
:param int parent_id: RegScale Parent Id
|
|
216
|
+
:param str parent_module: RegScale Parent Module
|
|
217
|
+
:param Path path: directory of file location
|
|
218
|
+
:param str obj_type: The model type to download
|
|
219
|
+
:return: None
|
|
220
|
+
:rtype: None
|
|
221
|
+
"""
|
|
222
|
+
import pandas as pd # Optimize import performance
|
|
223
|
+
|
|
224
|
+
app = Application()
|
|
225
|
+
|
|
226
|
+
# get model specified
|
|
227
|
+
obj = get_obj(obj_type)
|
|
228
|
+
if obj.use_query():
|
|
229
|
+
existing_data = get_all_by_query(obj, parent_id, parent_module, app)
|
|
230
|
+
else:
|
|
231
|
+
existing_data = get_all_by_parent(obj_type, parent_id, parent_module)
|
|
232
|
+
logger.debug(existing_data)
|
|
233
|
+
if len(existing_data) > 0:
|
|
234
|
+
match_fields_to_data(existing_data)
|
|
235
|
+
check_file_path(path)
|
|
236
|
+
workbook_title = get_workbook_title(obj_type, "", f"({parent_id}_{parent_module}")
|
|
237
|
+
workbook_filename = get_workbook_title(obj_type, ALL_PRE, FILE_POST)
|
|
238
|
+
old_workbook_filename = get_workbook_title(obj_type, OLD_PRE, FILE_POST)
|
|
239
|
+
build_workbook(path, workbook_filename, workbook_title)
|
|
240
|
+
shutil.copy(
|
|
241
|
+
os.path.join(path, workbook_filename),
|
|
242
|
+
os.path.join(path, old_workbook_filename),
|
|
243
|
+
)
|
|
244
|
+
all_df = put_data_into_df(existing_data)
|
|
245
|
+
with pd.ExcelWriter(
|
|
246
|
+
os.path.join(path, workbook_filename),
|
|
247
|
+
mode="a",
|
|
248
|
+
engine="openpyxl",
|
|
249
|
+
if_sheet_exists="overlay",
|
|
250
|
+
) as writer:
|
|
251
|
+
all_df.to_excel(
|
|
252
|
+
writer,
|
|
253
|
+
sheet_name=workbook_title,
|
|
254
|
+
index=False,
|
|
255
|
+
)
|
|
256
|
+
with pd.ExcelWriter(
|
|
257
|
+
os.path.join(path, old_workbook_filename),
|
|
258
|
+
mode="a",
|
|
259
|
+
engine="openpyxl",
|
|
260
|
+
if_sheet_exists="overlay",
|
|
261
|
+
) as writer:
|
|
262
|
+
all_df.to_excel(
|
|
263
|
+
writer,
|
|
264
|
+
sheet_name=workbook_title,
|
|
265
|
+
index=False,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
workbook2 = load_workbook(os.path.join(path, old_workbook_filename))
|
|
269
|
+
worksheet2 = workbook2.active
|
|
270
|
+
worksheet2.protection.sheet = True
|
|
271
|
+
workbook2.save(filename=os.path.join(path, old_workbook_filename))
|
|
272
|
+
|
|
273
|
+
# Adding Data Validation to ALL_ASSESSMENTS_WB file to be adjusted internally.
|
|
274
|
+
workbook = load_workbook(os.path.join(path, workbook_filename))
|
|
275
|
+
|
|
276
|
+
workbook.save(filename=os.path.join(path, workbook_filename))
|
|
277
|
+
logger.info(f"Your excel workbook has been created. Please open {workbook_filename} and add new {obj_type}s.")
|
|
278
|
+
|
|
279
|
+
else:
|
|
280
|
+
app.logger.info("Please check your selections for RegScale Id and RegScale Module and try again.")
|
|
281
|
+
error_and_exit(
|
|
282
|
+
"There was an error creating your workbook. No "
|
|
283
|
+
+ obj_type
|
|
284
|
+
+ " exist for the given RegScale Id and RegScale Module."
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
@model.command(name="load")
|
|
289
|
+
@click.option(
|
|
290
|
+
"--path",
|
|
291
|
+
type=click.Path(exists=False, dir_okay=True, path_type=Path),
|
|
292
|
+
help="Provide the desired path of excel workbook locations.",
|
|
293
|
+
default=os.path.join(os.getcwd(), "artifacts"),
|
|
294
|
+
required=True,
|
|
295
|
+
)
|
|
296
|
+
@click.option(
|
|
297
|
+
"--model",
|
|
298
|
+
type=click.Choice(
|
|
299
|
+
[
|
|
300
|
+
"assessment",
|
|
301
|
+
"control",
|
|
302
|
+
"issue",
|
|
303
|
+
"component",
|
|
304
|
+
"asset",
|
|
305
|
+
], # , 'risk'], #, 'task'],
|
|
306
|
+
case_sensitive=False,
|
|
307
|
+
),
|
|
308
|
+
help="Specify the type of bulk load file to load.",
|
|
309
|
+
default="assessment",
|
|
310
|
+
required=True,
|
|
311
|
+
)
|
|
312
|
+
def load(path: Path, model: str) -> None:
|
|
313
|
+
"""
|
|
314
|
+
This function uploads updated assessments and new assessments to
|
|
315
|
+
RegScale from the Excel files that users have edited.
|
|
316
|
+
"""
|
|
317
|
+
upload_data(path=path, obj_type=model)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
# pylint: disable=R0914
|
|
321
|
+
def upload_data(path: Path, obj_type: str) -> None:
|
|
322
|
+
"""
|
|
323
|
+
Function will upload assessments to RegScale if user as made edits to any
|
|
324
|
+
of the assessment excel workbooks
|
|
325
|
+
|
|
326
|
+
:param Path path: directory of file location
|
|
327
|
+
:param str obj_type: The model type to download
|
|
328
|
+
:return: None
|
|
329
|
+
:rtype: None
|
|
330
|
+
"""
|
|
331
|
+
import numpy as np # Optimize import performance
|
|
332
|
+
import pandas as pd
|
|
333
|
+
|
|
334
|
+
app = Application()
|
|
335
|
+
api = Api()
|
|
336
|
+
|
|
337
|
+
# get model specified - This is to populate the obj_fields structure
|
|
338
|
+
obj = get_obj(obj_type)
|
|
339
|
+
if not obj:
|
|
340
|
+
app.logger.error("Unable to instantiate an object of type {}".format(obj_type))
|
|
341
|
+
all_workbook_filename = get_workbook_title(obj_type, ALL_PRE, FILE_POST)
|
|
342
|
+
old_workbook_filename = get_workbook_title(obj_type, OLD_PRE, FILE_POST)
|
|
343
|
+
new_workbook_filename = get_workbook_title(obj_type, NEW_PRE, FILE_POST)
|
|
344
|
+
if os.path.isfile(os.path.join(path, new_workbook_filename)):
|
|
345
|
+
upload_new_data(app, path, obj_type, new_workbook_filename)
|
|
346
|
+
else:
|
|
347
|
+
app.logger.info("No new " + obj_type + " detected. Checking for edited " + obj_type + "s.")
|
|
348
|
+
|
|
349
|
+
if os.path.isfile(os.path.join(path, all_workbook_filename)):
|
|
350
|
+
if not os.path.isfile(os.path.join(path, old_workbook_filename)):
|
|
351
|
+
return app.logger.error("Missing pre-change copy file, unable to determine if changes were made. Aborting!")
|
|
352
|
+
df1 = pd.read_excel(os.path.join(path, old_workbook_filename), sheet_name=0, index_col="Id")
|
|
353
|
+
|
|
354
|
+
df2 = pd.read_excel(os.path.join(path, all_workbook_filename), sheet_name=0, index_col="Id")
|
|
355
|
+
|
|
356
|
+
if df1.equals(df2):
|
|
357
|
+
error_and_exit("No differences detected.")
|
|
358
|
+
|
|
359
|
+
else:
|
|
360
|
+
app.logger.warning("Differences found!")
|
|
361
|
+
# Need to strip out any net new rows before doing this comparison
|
|
362
|
+
df2 = strip_any_net_new_rows(app, df2, all_workbook_filename, obj_type, path, new_workbook_filename)
|
|
363
|
+
diff_mask = (df1 != df2) & ~(df1.isnull() & df2.isnull())
|
|
364
|
+
ne_stacked = diff_mask.stack()
|
|
365
|
+
changed = ne_stacked[ne_stacked]
|
|
366
|
+
changed.index.names = ["Id", "Column"]
|
|
367
|
+
difference_locations = np.nonzero(diff_mask)
|
|
368
|
+
changed_from = df1.values[difference_locations]
|
|
369
|
+
changed_to = df2.values[difference_locations]
|
|
370
|
+
changes = pd.DataFrame({"From": changed_from, "To": changed_to}, index=changed.index)
|
|
371
|
+
changes.to_csv(
|
|
372
|
+
os.path.join(path, DIFFERENCES_FILE),
|
|
373
|
+
header=True,
|
|
374
|
+
index=True,
|
|
375
|
+
sep=" ",
|
|
376
|
+
mode="w+",
|
|
377
|
+
)
|
|
378
|
+
app.logger.info(
|
|
379
|
+
"Please check differences.txt file located in %s to see changes made.",
|
|
380
|
+
path,
|
|
381
|
+
)
|
|
382
|
+
upload_existing_data(app, api, path, obj_type, all_workbook_filename)
|
|
383
|
+
else:
|
|
384
|
+
app.logger.info("No files found for the specified type to load to RegScale.")
|
|
385
|
+
return app.logger.info(
|
|
386
|
+
obj_type + " files have been uploaded. Changes made to existing files can be seen in "
|
|
387
|
+
"differences.txt file. Thank you!"
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
# pylint: enable=R0914
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
@model.command(name="delete_files")
|
|
395
|
+
@click.option(
|
|
396
|
+
"--path",
|
|
397
|
+
type=click.Path(exists=False, dir_okay=True, path_type=Path),
|
|
398
|
+
help="Provide the desired path of file location.",
|
|
399
|
+
default=Path("./artifacts"),
|
|
400
|
+
required=True,
|
|
401
|
+
)
|
|
402
|
+
@click.option(
|
|
403
|
+
"--model",
|
|
404
|
+
type=click.Choice(
|
|
405
|
+
[
|
|
406
|
+
"assessment",
|
|
407
|
+
"control",
|
|
408
|
+
"issue",
|
|
409
|
+
"component",
|
|
410
|
+
"asset",
|
|
411
|
+
], # , 'risk'], #, 'task'],
|
|
412
|
+
case_sensitive=False,
|
|
413
|
+
),
|
|
414
|
+
help="Specify the type of bulk load file to delete.",
|
|
415
|
+
default="assessment",
|
|
416
|
+
required=True,
|
|
417
|
+
)
|
|
418
|
+
def generate_delete_file(path: Path, model: str):
|
|
419
|
+
"""This command will delete files used during the Assessment editing process."""
|
|
420
|
+
delete_file(path, model)
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
def delete_file(path: Path, obj_type: str) -> int:
|
|
424
|
+
"""
|
|
425
|
+
Deletes files used during the process
|
|
426
|
+
|
|
427
|
+
:param Path path: directory of file location
|
|
428
|
+
:param str obj_type: The model type to download
|
|
429
|
+
:return: Number of files deleted
|
|
430
|
+
:rtype: int
|
|
431
|
+
"""
|
|
432
|
+
log = create_logger()
|
|
433
|
+
all_workbook_filename = get_workbook_title(obj_type, ALL_PRE, FILE_POST)
|
|
434
|
+
old_workbook_filename = get_workbook_title(obj_type, OLD_PRE, FILE_POST)
|
|
435
|
+
new_workbook_filename = get_workbook_title(obj_type, NEW_PRE, FILE_POST)
|
|
436
|
+
file_names = [
|
|
437
|
+
new_workbook_filename,
|
|
438
|
+
all_workbook_filename,
|
|
439
|
+
old_workbook_filename,
|
|
440
|
+
DIFFERENCES_FILE,
|
|
441
|
+
]
|
|
442
|
+
deleted_files = []
|
|
443
|
+
|
|
444
|
+
for file_name in file_names:
|
|
445
|
+
if os.path.isfile(path / file_name):
|
|
446
|
+
os.remove(path / file_name)
|
|
447
|
+
deleted_files.append(file_name)
|
|
448
|
+
else:
|
|
449
|
+
log.warning("No %s file found. Checking for other files before exiting.", file_name)
|
|
450
|
+
log.info("%i file(s) have been deleted: %s", len(deleted_files), ", ".join(deleted_files))
|
|
451
|
+
return len(deleted_files)
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
def upload_new_data(app: Application, path: Path, obj_type: str, workbook_filename: str) -> None:
|
|
455
|
+
"""
|
|
456
|
+
This method reads in the spreadsheet filled with new records to upload into RegScale, converts
|
|
457
|
+
them into the appropriate object type, and saves them in RegScale.
|
|
458
|
+
|
|
459
|
+
:param Application app: The Application instance
|
|
460
|
+
:param Path path: The path where the Excel file can be found
|
|
461
|
+
:param str obj_type: The model type to load the records as
|
|
462
|
+
:param str workbook_filename: The file name of the Excel spreadsheet
|
|
463
|
+
:return: None
|
|
464
|
+
:rtype: None
|
|
465
|
+
"""
|
|
466
|
+
new_files = os.path.join(path, workbook_filename)
|
|
467
|
+
wb_data = map_workbook_to_dict(new_files)
|
|
468
|
+
load_objs = convert_dict_to_model(wb_data, obj_type)
|
|
469
|
+
post_and_save_models(app, load_objs, path, obj_type, workbook_filename)
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
def strip_any_net_new_rows(
|
|
473
|
+
app: Application, df: "pd.DataFrame", workbook_filename: str, obj_type: str, path: Path, new_workbook_filename: str
|
|
474
|
+
) -> "pd.DataFrame":
|
|
475
|
+
"""
|
|
476
|
+
This method scans the loaded workbook for any new rows and strips them out to insert separately.
|
|
477
|
+
|
|
478
|
+
:param Application app: The Application instance
|
|
479
|
+
:param pd.DataFrame df: The DataFrame from the loaded workbook
|
|
480
|
+
:param str workbook_filename: The file name of the Excel spreadsheet
|
|
481
|
+
:param str obj_type: The model type to load the records as
|
|
482
|
+
:param Path path: The path where the Excel file can be found
|
|
483
|
+
:param str new_workbook_filename: The file name of the Excel spreadsheet with new records.
|
|
484
|
+
:return: pd.DataFrame The updated DataFrame, minus any new rows
|
|
485
|
+
:rtype: pd.DataFrame
|
|
486
|
+
"""
|
|
487
|
+
import pandas as pd # Optimize import performance
|
|
488
|
+
|
|
489
|
+
df_updates = []
|
|
490
|
+
df_inserts = []
|
|
491
|
+
indexes = []
|
|
492
|
+
columns = extract_columns_from_dataframe(df)
|
|
493
|
+
obj = get_obj(obj_type)
|
|
494
|
+
for x in df.index:
|
|
495
|
+
if math.isnan(x):
|
|
496
|
+
data_rec = {}
|
|
497
|
+
for y in columns:
|
|
498
|
+
data_rec[y] = df.at[x, y]
|
|
499
|
+
df_inserts.append(convert_new_record_to_model(data_rec, obj_type, path, workbook_filename))
|
|
500
|
+
else:
|
|
501
|
+
indexes.append(x)
|
|
502
|
+
data_rec = []
|
|
503
|
+
for y in columns:
|
|
504
|
+
data_rec.append(df.at[x, y])
|
|
505
|
+
df_updates.append(data_rec)
|
|
506
|
+
new_df = pd.DataFrame(df_updates, index=indexes, columns=columns)
|
|
507
|
+
if len(df_inserts) > 0:
|
|
508
|
+
if obj.is_new_excel_record_allowed():
|
|
509
|
+
post_and_save_models(app, df_inserts, path, obj_type, new_workbook_filename)
|
|
510
|
+
else:
|
|
511
|
+
app.logger.warning(
|
|
512
|
+
"New rows have been found in the Excel spreadsheet being loaded. New records for this model are not allowed."
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
return new_df
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
def extract_columns_from_dataframe(df: "pd.DataFrame") -> list:
|
|
519
|
+
"""
|
|
520
|
+
Builds a list of the columns in the dataframe.
|
|
521
|
+
|
|
522
|
+
:param pd.DataFrame df:
|
|
523
|
+
:return: list of column names
|
|
524
|
+
:rtype: list
|
|
525
|
+
"""
|
|
526
|
+
return [y for y in df.columns]
|
|
527
|
+
|
|
528
|
+
|
|
529
|
+
def convert_new_record_to_model(data_rec: dict, obj_type: str, path: Path, workbook_filename: str) -> object:
|
|
530
|
+
"""
|
|
531
|
+
This method takes the new record found in the Excel file of existing records, and converts it
|
|
532
|
+
into a model object for inserting into the database.
|
|
533
|
+
|
|
534
|
+
:param dict data_rec: The new record data extracted from the Excel file
|
|
535
|
+
:param str obj_type: The model type to load the records as
|
|
536
|
+
:param Path path: The path where the Excel file can be found
|
|
537
|
+
:param str workbook_filename: The file name of the Excel spreadsheet
|
|
538
|
+
:return: object
|
|
539
|
+
:rtype: object
|
|
540
|
+
:raises ValueError:
|
|
541
|
+
"""
|
|
542
|
+
new_obj = {}
|
|
543
|
+
for cur_field in obj_fields:
|
|
544
|
+
new_obj[cur_field.field_name] = get_basic_field_value(cur_field, data_rec)
|
|
545
|
+
new_obj[cur_field.field_name] = format_loaded_field_value(cur_field, new_obj[cur_field.field_name])
|
|
546
|
+
if len(cur_field.lookup_field) > 0:
|
|
547
|
+
match_value = new_obj[cur_field.field_name]
|
|
548
|
+
if (match_value is None) & cur_field.required:
|
|
549
|
+
message = f"{cur_field.column_name}: No value selected in new row. Please select a value from the list."
|
|
550
|
+
# logger.error(message)
|
|
551
|
+
raise ValueError(message)
|
|
552
|
+
workbook_path = os.path.join(path, workbook_filename)
|
|
553
|
+
new_obj[cur_field.field_name] = lookup_value_in_sheet(
|
|
554
|
+
workbook_path, cur_field.lookup_field, match_value, cur_field.column_name
|
|
555
|
+
)
|
|
556
|
+
if new_obj[cur_field.field_name] is None:
|
|
557
|
+
new_obj[cur_field.field_name] = generate_default_value_for_field(cur_field.field_name, cur_field.data_type)
|
|
558
|
+
elif cur_field.data_type == "str":
|
|
559
|
+
if not isinstance(new_obj[cur_field.field_name], str):
|
|
560
|
+
new_obj[cur_field.field_name] = str(new_obj[cur_field.field_name])
|
|
561
|
+
return cast_dict_as_model(new_obj, obj_type)
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def generate_default_value_for_field(field_name: str, data_type: str) -> Any:
|
|
565
|
+
"""
|
|
566
|
+
Generate a default value for a required field.
|
|
567
|
+
|
|
568
|
+
:param str field_name: Name of the field to generate a default value for
|
|
569
|
+
:param str data_type: the data type to generate a default value for
|
|
570
|
+
:return Any: the default value to use when creating a new record
|
|
571
|
+
:rtype Any:
|
|
572
|
+
"""
|
|
573
|
+
if field_name == "id":
|
|
574
|
+
return 0
|
|
575
|
+
if field_name.find("Id") >= 0:
|
|
576
|
+
return None
|
|
577
|
+
if data_type == "int":
|
|
578
|
+
return 0
|
|
579
|
+
if data_type == "bool":
|
|
580
|
+
return False
|
|
581
|
+
if data_type == "str":
|
|
582
|
+
return ""
|
|
583
|
+
if data_type == "float":
|
|
584
|
+
return 0.0
|
|
585
|
+
|
|
586
|
+
|
|
587
|
+
# pylint: disable=E1136,R0914
|
|
588
|
+
def upload_existing_data(app: Application, api: Api, path: Path, obj_type: str, workbook_filename: str) -> None:
|
|
589
|
+
"""
|
|
590
|
+
This method reads in the spreadsheet filled with existing records to update in RegScale.
|
|
591
|
+
|
|
592
|
+
:param Application app: The Application instance
|
|
593
|
+
:param Api api: The instance api handler
|
|
594
|
+
:param Path path: The path where the Excel file can be found
|
|
595
|
+
:param str obj_type: The model type to load the records as
|
|
596
|
+
:param str workbook_filename: The file name of the Excel spreadsheet
|
|
597
|
+
:return: None
|
|
598
|
+
:rtype: None
|
|
599
|
+
"""
|
|
600
|
+
import pandas as pd # Optimize import performance
|
|
601
|
+
|
|
602
|
+
obj = get_obj(obj_type)
|
|
603
|
+
# Loading in differences.txt file and using Id to parse xlsx file for rows to update
|
|
604
|
+
|
|
605
|
+
diff = pd.read_csv(os.path.join(path, DIFFERENCES_FILE), header=0, sep=" ", index_col=None)
|
|
606
|
+
ids = []
|
|
607
|
+
|
|
608
|
+
changes = []
|
|
609
|
+
for _, row in diff.iterrows():
|
|
610
|
+
row_chgs = {}
|
|
611
|
+
ids.append(row["Id"])
|
|
612
|
+
row_chgs["id"] = row["Id"]
|
|
613
|
+
row_chgs["column"] = row["Column"]
|
|
614
|
+
row_chgs["value"] = row["To"]
|
|
615
|
+
changes.append(row_chgs)
|
|
616
|
+
|
|
617
|
+
logger.debug(changes)
|
|
618
|
+
id_df = pd.DataFrame(ids, index=None, columns=["Id"])
|
|
619
|
+
id_df2 = id_df.drop_duplicates()
|
|
620
|
+
updated_files = os.path.join(path, workbook_filename)
|
|
621
|
+
df3 = pd.read_excel(updated_files, sheet_name=0, index_col=None)
|
|
622
|
+
updated = df3[df3["Id"].isin(id_df2["Id"])]
|
|
623
|
+
updated = map_workbook_to_dict(updated_files, updated)
|
|
624
|
+
config = app.config
|
|
625
|
+
load_objs = load_model_for_id(api, updated, config["domain"] + obj.get_endpoint("get"))
|
|
626
|
+
load_data = []
|
|
627
|
+
for cur_obj in load_objs:
|
|
628
|
+
cur_obj_dict = find_and_apply_changes(cur_obj, changes, updated)
|
|
629
|
+
load_data.append(cur_obj_dict)
|
|
630
|
+
api.update_server(
|
|
631
|
+
url=config["domain"] + obj.get_endpoint("insert"),
|
|
632
|
+
json_list=load_data,
|
|
633
|
+
message="Working on uploading updated " + obj_type + " to RegScale.",
|
|
634
|
+
config=config,
|
|
635
|
+
method="put",
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
|
|
639
|
+
# pylint: enable=E1136,R0914
|
|
640
|
+
|
|
641
|
+
|
|
642
|
+
def find_and_apply_changes(cur_object: dict, changes: list, updates: dict) -> dict:
|
|
643
|
+
"""
|
|
644
|
+
This method looks through the changes and applies those that should be applied to
|
|
645
|
+
the current object.
|
|
646
|
+
|
|
647
|
+
:param dict cur_object: the current object being updated
|
|
648
|
+
:param list changes: a list of the specific changes to apply
|
|
649
|
+
:param dict updates: a dictionary of updated models to be applied to the current object(s)
|
|
650
|
+
:return: dict the updated object
|
|
651
|
+
:rtype: dict
|
|
652
|
+
"""
|
|
653
|
+
for cur_change in changes:
|
|
654
|
+
if cur_change["id"] == cur_object["id"]:
|
|
655
|
+
field_def = get_field_def_for_column(cur_change["column"])
|
|
656
|
+
if len(field_def.lookup_field) > 0:
|
|
657
|
+
cur_object[field_def.field_name] = check_empty_nan(
|
|
658
|
+
extract_update_for_column(field_def.field_name, cur_change["id"], updates)
|
|
659
|
+
)
|
|
660
|
+
else:
|
|
661
|
+
cur_object[get_field_name_for_column(cur_change["column"])] = check_empty_nan(cur_change["value"])
|
|
662
|
+
return cur_object
|
|
663
|
+
|
|
664
|
+
|
|
665
|
+
def extract_update_for_column(field_name: str, rec_id: int, updates: dict) -> Any:
|
|
666
|
+
"""
|
|
667
|
+
This method will look through the updated record dictionary and extract the updated
|
|
668
|
+
value for the field being updated.
|
|
669
|
+
|
|
670
|
+
:param str field_name: The name of the field to get the updated value for
|
|
671
|
+
:param int rec_id: The id of the record to be updated
|
|
672
|
+
:param dict updates: a dictionary of updates
|
|
673
|
+
:return Any: The updated model to be applied to the database
|
|
674
|
+
:rtype Any:
|
|
675
|
+
"""
|
|
676
|
+
update_keys = updates.keys()
|
|
677
|
+
for cur_key in update_keys:
|
|
678
|
+
cur_update = updates[cur_key]
|
|
679
|
+
if ("Id" in cur_update.keys()) & (field_name in cur_update.keys()) & (cur_update["Id"] == rec_id):
|
|
680
|
+
return cur_update[field_name]
|
|
681
|
+
return None
|
|
682
|
+
|
|
683
|
+
|
|
684
|
+
def get_field_name_for_column(column_label: str) -> str:
|
|
685
|
+
"""
|
|
686
|
+
This method iterates through the fields and finds the matching column.
|
|
687
|
+
|
|
688
|
+
:param str column_label: The column label to find the field name for
|
|
689
|
+
:return: str the field name to use
|
|
690
|
+
:rtype: str
|
|
691
|
+
"""
|
|
692
|
+
for cur_field in obj_fields:
|
|
693
|
+
if cur_field.column_name == column_label:
|
|
694
|
+
return cur_field.field_name
|
|
695
|
+
return ""
|
|
696
|
+
|
|
697
|
+
|
|
698
|
+
def get_field_def_for_column(column_label: str) -> Any:
|
|
699
|
+
"""
|
|
700
|
+
This method iterates through the fields and finds the matching column,
|
|
701
|
+
then returns the column configuration build from the model definition.
|
|
702
|
+
|
|
703
|
+
:param str column_label: The column label to find the field definition for
|
|
704
|
+
:return Any: the field definition
|
|
705
|
+
:rtype Any:
|
|
706
|
+
"""
|
|
707
|
+
for cur_field in obj_fields:
|
|
708
|
+
if cur_field.column_name == column_label:
|
|
709
|
+
return cur_field
|
|
710
|
+
return None
|
|
711
|
+
|
|
712
|
+
|
|
713
|
+
# pylint: disable=R0913
|
|
714
|
+
def post_and_save_models(
|
|
715
|
+
app: Application,
|
|
716
|
+
new_models: list,
|
|
717
|
+
workbook_path: Path,
|
|
718
|
+
obj_type: str,
|
|
719
|
+
load_file_name: str,
|
|
720
|
+
) -> None:
|
|
721
|
+
"""
|
|
722
|
+
Function to post new records to RegScale and save record ids to excel workbook
|
|
723
|
+
|
|
724
|
+
:param Application app: RegScale CLI Application object
|
|
725
|
+
:param list new_models: List of new records to post to RegScale
|
|
726
|
+
:param Path workbook_path: Path to workbook to save assessment ids to
|
|
727
|
+
:param str obj_type: the model type to upload
|
|
728
|
+
:param str load_file_name: The file name of the Excel file to update with record IDs
|
|
729
|
+
:return: None
|
|
730
|
+
:rtype: None
|
|
731
|
+
"""
|
|
732
|
+
import pandas as pd # Optimize import performance
|
|
733
|
+
|
|
734
|
+
try:
|
|
735
|
+
new_objs = []
|
|
736
|
+
for cur_obj in new_models:
|
|
737
|
+
new_obj = cur_obj.create()
|
|
738
|
+
cur_obj.create_new_connecting_model(new_obj)
|
|
739
|
+
new_objs.append(cur_obj)
|
|
740
|
+
new_objs_df = pd.DataFrame([obj.id for obj in new_objs], columns=["id_number"])
|
|
741
|
+
for file_name in [load_file_name]:
|
|
742
|
+
with pd.ExcelWriter(
|
|
743
|
+
os.path.join(workbook_path, file_name),
|
|
744
|
+
mode="a",
|
|
745
|
+
engine="openpyxl",
|
|
746
|
+
if_sheet_exists="overlay",
|
|
747
|
+
) as writer:
|
|
748
|
+
new_objs_df.to_excel(
|
|
749
|
+
writer,
|
|
750
|
+
sheet_name=obj_type + "_Ids",
|
|
751
|
+
index=False,
|
|
752
|
+
)
|
|
753
|
+
app.logger.info(
|
|
754
|
+
"%i total " + obj_type + "(s) were added to RegScale.",
|
|
755
|
+
len(new_objs),
|
|
756
|
+
)
|
|
757
|
+
except Exception as e:
|
|
758
|
+
app.logger.error(e)
|
|
759
|
+
|
|
760
|
+
|
|
761
|
+
# pylint: enable=R0913
|
|
762
|
+
|
|
763
|
+
|
|
764
|
+
def map_pandas_timestamp(date_time: "pd.Timestamp") -> Optional[str]:
|
|
765
|
+
"""
|
|
766
|
+
Function to map pandas timestamp to string
|
|
767
|
+
|
|
768
|
+
:param pd.Timestamp date_time:
|
|
769
|
+
:return: String representation of pandas timestamp
|
|
770
|
+
:rtype: Optional[str]
|
|
771
|
+
"""
|
|
772
|
+
import pandas as pd # Optimize import performance
|
|
773
|
+
|
|
774
|
+
if pd.isnull(date_time):
|
|
775
|
+
return None
|
|
776
|
+
if isinstance(date_time, float):
|
|
777
|
+
return None
|
|
778
|
+
if date_time is not None and not pd.isna(date_time) and not isinstance(date_time, str):
|
|
779
|
+
return date_time.strftime("%Y-%m-%d %H:%M:%S")
|
|
780
|
+
return date_time or None
|
|
781
|
+
|
|
782
|
+
|
|
783
|
+
def load_model_for_id(api: Api, wb_data: dict, url: str) -> list:
|
|
784
|
+
"""
|
|
785
|
+
This method loads the current record for the updated objects.
|
|
786
|
+
|
|
787
|
+
:param Api api: the API object instance to use
|
|
788
|
+
:param dict wb_data: The submitted workbook data in a dict
|
|
789
|
+
:param str url: the base url to use to retrieve the model data
|
|
790
|
+
:return: list of instances of the specified model, populated with the dict
|
|
791
|
+
:rtype: list
|
|
792
|
+
"""
|
|
793
|
+
load_data = []
|
|
794
|
+
for cur_obj in wb_data:
|
|
795
|
+
obj = wb_data[cur_obj]
|
|
796
|
+
cur_id = int(obj["Id"])
|
|
797
|
+
if cur_id > 0:
|
|
798
|
+
url_to_use = url.replace("{id}", str(cur_id))
|
|
799
|
+
url_to_use = check_url_for_double_slash(url_to_use)
|
|
800
|
+
result = api.get(url_to_use)
|
|
801
|
+
if result.status_code == 200:
|
|
802
|
+
load_data.append(result.json())
|
|
803
|
+
return load_data
|
|
804
|
+
|
|
805
|
+
|
|
806
|
+
def check_url_for_double_slash(url: str) -> str:
|
|
807
|
+
"""
|
|
808
|
+
This method checks URLs for a double slash in the wrong place.
|
|
809
|
+
|
|
810
|
+
:param str url: the base url to be checked for double slash characters
|
|
811
|
+
:return str: the url without double slashes
|
|
812
|
+
:rtype str:
|
|
813
|
+
"""
|
|
814
|
+
protocol_part = url[: url.find("://") + 3]
|
|
815
|
+
remainder = url[url.find("://") + 3 :]
|
|
816
|
+
remainder = remainder.replace("//", "/")
|
|
817
|
+
return protocol_part + remainder
|
|
818
|
+
|
|
819
|
+
|
|
820
|
+
def convert_dict_to_model(wb_data: dict, obj_type: str) -> list:
|
|
821
|
+
"""
|
|
822
|
+
This method converts the workbook dict to match what the model expects and then
|
|
823
|
+
casts the resulting dict as an instance of the specified model.
|
|
824
|
+
|
|
825
|
+
:param dict wb_data: The submitted workbook data in a dict
|
|
826
|
+
:param str obj_type: The model type to instantiate
|
|
827
|
+
:return: list instance of the specified model, populated with the dict
|
|
828
|
+
:rtype: list
|
|
829
|
+
"""
|
|
830
|
+
loaded_data = []
|
|
831
|
+
for cur_obj in wb_data:
|
|
832
|
+
cur_data = wb_data[cur_obj]
|
|
833
|
+
new_obj = {}
|
|
834
|
+
for cur_field in obj_fields:
|
|
835
|
+
new_obj[cur_field.field_name] = get_basic_field_value(cur_field, cur_data)
|
|
836
|
+
new_obj[cur_field.field_name] = format_loaded_field_value(cur_field, new_obj[cur_field.field_name])
|
|
837
|
+
if new_obj[cur_field.field_name] is None:
|
|
838
|
+
if cur_field.field_name == "id":
|
|
839
|
+
new_obj[cur_field.field_name] = 0
|
|
840
|
+
elif cur_field.data_type == "str":
|
|
841
|
+
if not isinstance(new_obj[cur_field.field_name], str):
|
|
842
|
+
new_obj[cur_field.field_name] = str(new_obj[cur_field.field_name])
|
|
843
|
+
|
|
844
|
+
loaded_data.append(cast_dict_as_model(new_obj, obj_type))
|
|
845
|
+
return loaded_data
|
|
846
|
+
|
|
847
|
+
|
|
848
|
+
def get_basic_field_value(cur_field: FieldMakeup, cur_wb_data: dict) -> str:
|
|
849
|
+
"""
|
|
850
|
+
Lookup and return the basic value for the field.
|
|
851
|
+
|
|
852
|
+
:param FieldMakeup cur_field: The current field metadata
|
|
853
|
+
:param dict cur_wb_data: The collection of values from the Workbook
|
|
854
|
+
:return: str
|
|
855
|
+
:rtype: str
|
|
856
|
+
"""
|
|
857
|
+
if cur_field.field_name in cur_wb_data.keys():
|
|
858
|
+
return cur_wb_data[cur_field.field_name]
|
|
859
|
+
if cur_field.column_name in cur_wb_data.keys():
|
|
860
|
+
return cur_wb_data[cur_field.column_name]
|
|
861
|
+
return ""
|
|
862
|
+
|
|
863
|
+
|
|
864
|
+
def format_loaded_field_value(cur_field: FieldMakeup, cur_value: Any) -> Any:
|
|
865
|
+
"""
|
|
866
|
+
Format the current value based on the field data type
|
|
867
|
+
|
|
868
|
+
:param FieldMakeup cur_field: The current field metadata
|
|
869
|
+
:param Any cur_value: The current value of the field
|
|
870
|
+
:return: Any the return value type depends on the field metadata
|
|
871
|
+
:rtype: Any
|
|
872
|
+
"""
|
|
873
|
+
if cur_field.treat_as_date:
|
|
874
|
+
return map_pandas_timestamp(cur_value)
|
|
875
|
+
else:
|
|
876
|
+
return check_empty_nan(cur_value)
|
|
877
|
+
|
|
878
|
+
|
|
879
|
+
def cast_dict_as_model(obj_data: dict, obj_type: str) -> object:
|
|
880
|
+
"""
|
|
881
|
+
This method uses the .from_dict() method on each of the respective models to
|
|
882
|
+
instantiate them with the submitted data.
|
|
883
|
+
|
|
884
|
+
:param dict obj_data: The submitted workbook data in a dict
|
|
885
|
+
:param str obj_type: The model type to instantiate
|
|
886
|
+
:return: object instance of the specified model, populated with the dict
|
|
887
|
+
:rtype: object
|
|
888
|
+
"""
|
|
889
|
+
rtn_obj = None
|
|
890
|
+
if obj_type == "assessment":
|
|
891
|
+
rtn_obj = Assessment.from_dict(obj_data)
|
|
892
|
+
elif obj_type == "control":
|
|
893
|
+
rtn_obj = Control.from_dict(obj_data)
|
|
894
|
+
elif obj_type == "issue":
|
|
895
|
+
rtn_obj = Issue.from_dict(obj_data)
|
|
896
|
+
elif obj_type == "asset":
|
|
897
|
+
rtn_obj = Asset.from_dict(obj_data)
|
|
898
|
+
elif obj_type == "component":
|
|
899
|
+
rtn_obj = Component.from_dict(obj_data)
|
|
900
|
+
elif obj_type == "risk":
|
|
901
|
+
rtn_obj = Risk.from_dict(obj_data)
|
|
902
|
+
# elif obj_type == "task":
|
|
903
|
+
# rtn_obj = None
|
|
904
|
+
return rtn_obj
|
|
905
|
+
|
|
906
|
+
|
|
907
|
+
def map_workbook_to_dict(file_path: str, workbook_data: Optional["pd.DataFrame"] = None) -> dict:
|
|
908
|
+
"""
|
|
909
|
+
Function to map workbook to dictionary
|
|
910
|
+
|
|
911
|
+
:param str file_path: Path to workbook file
|
|
912
|
+
:param Optional[pd.DataFrame] workbook_data: Dataframe to map to dictionary
|
|
913
|
+
:return: dict representation of workbook
|
|
914
|
+
:rtype: dict
|
|
915
|
+
"""
|
|
916
|
+
return map_workbook_to_lookups(file_path, workbook_data).T.to_dict()
|
|
917
|
+
|
|
918
|
+
|
|
919
|
+
def map_workbook_to_lookups(file_path: str, workbook_data: Optional["pd.DataFrame"] = None) -> "pd.DataFrame":
|
|
920
|
+
"""
|
|
921
|
+
Function to map workbook to dictionary
|
|
922
|
+
|
|
923
|
+
:param str file_path: Path to workbook file
|
|
924
|
+
:param Optional[pd.DataFrame] workbook_data: Dataframe to map to dictionary
|
|
925
|
+
:return: pd.DataFrame representation of workbook
|
|
926
|
+
:rtype: pd.DataFrame
|
|
927
|
+
"""
|
|
928
|
+
import pandas as pd # Optimize import performance
|
|
929
|
+
|
|
930
|
+
if workbook_data is not None:
|
|
931
|
+
wb_data = workbook_data
|
|
932
|
+
else:
|
|
933
|
+
wb_data = pd.read_excel(file_path)
|
|
934
|
+
for cur_row in obj_fields:
|
|
935
|
+
if len(cur_row.lookup_field) > 0 and cur_row.lookup_field != "module":
|
|
936
|
+
if cur_row.column_name in wb_data.columns:
|
|
937
|
+
wb_data.fillna("None")
|
|
938
|
+
lookup_wb = pd.read_excel(file_path, sheet_name=cur_row.column_name)
|
|
939
|
+
if cur_row.lookup_field == "user":
|
|
940
|
+
lookup_wb = lookup_wb.rename(
|
|
941
|
+
columns={
|
|
942
|
+
"User": cur_row.column_name,
|
|
943
|
+
"UserId": cur_row.field_name,
|
|
944
|
+
}
|
|
945
|
+
)
|
|
946
|
+
else:
|
|
947
|
+
lookup_wb = lookup_wb.rename(
|
|
948
|
+
columns={
|
|
949
|
+
"name": cur_row.column_name,
|
|
950
|
+
"id": cur_row.field_name,
|
|
951
|
+
}
|
|
952
|
+
)
|
|
953
|
+
lookup_wb[cur_row.column_name] = lookup_wb[cur_row.column_name].astype(
|
|
954
|
+
str
|
|
955
|
+
) # Ensure consistent data type
|
|
956
|
+
wb_data = wb_data.merge(
|
|
957
|
+
lookup_wb,
|
|
958
|
+
how="left",
|
|
959
|
+
on=cur_row.column_name,
|
|
960
|
+
validate="many_to_many",
|
|
961
|
+
)
|
|
962
|
+
return wb_data
|
|
963
|
+
|
|
964
|
+
|
|
965
|
+
def lookup_value_in_sheet(file_path: str, lookup_field: str, match_value: str, sheet_name: str) -> Any:
|
|
966
|
+
"""
|
|
967
|
+
This method looks up the specified value in the specified sheet to get the corresponding value.
|
|
968
|
+
|
|
969
|
+
:param str file_path: Path to workbook file
|
|
970
|
+
:param str lookup_field: The field being looked up
|
|
971
|
+
:param str match_value: The value to match against the lookup sheet
|
|
972
|
+
:param str sheet_name: The name of the lookup sheet to use
|
|
973
|
+
:return: Any The lookup value
|
|
974
|
+
:rtype: Any
|
|
975
|
+
"""
|
|
976
|
+
import pandas as pd # Optimize import performance
|
|
977
|
+
|
|
978
|
+
logger.debug("Looking up value in sheet - Field = {}".format(lookup_field))
|
|
979
|
+
if lookup_field == "module":
|
|
980
|
+
return match_value
|
|
981
|
+
match_col = "name"
|
|
982
|
+
val_col = "id"
|
|
983
|
+
if lookup_field == "user":
|
|
984
|
+
logger.debug("Lookup user field")
|
|
985
|
+
match_col = "User"
|
|
986
|
+
val_col = "UserId"
|
|
987
|
+
try:
|
|
988
|
+
lookup_wb = pd.read_excel(file_path, sheet_name=sheet_name)
|
|
989
|
+
except ValueError:
|
|
990
|
+
return None
|
|
991
|
+
for x in lookup_wb.index:
|
|
992
|
+
lookup_val = lookup_wb.at[x, match_col]
|
|
993
|
+
if lookup_val == match_value:
|
|
994
|
+
logger.debug("Found Match!")
|
|
995
|
+
return lookup_wb.at[x, val_col]
|
|
996
|
+
if lookup_field == "user":
|
|
997
|
+
logger.debug("Lookup user field returning empty string!")
|
|
998
|
+
return ""
|
|
999
|
+
return 0
|
|
1000
|
+
|
|
1001
|
+
|
|
1002
|
+
def put_data_into_df(obj_list: list) -> "pd.DataFrame":
|
|
1003
|
+
"""
|
|
1004
|
+
This method takes the passed data and loads it into a data frame for
|
|
1005
|
+
converting into an Excel spreadsheet.
|
|
1006
|
+
|
|
1007
|
+
:param list obj_list: a list of records for loading into the data frame
|
|
1008
|
+
:return: pd.DataFrame
|
|
1009
|
+
:rtype: pd.DataFrame
|
|
1010
|
+
"""
|
|
1011
|
+
import pandas as pd # Optimize import performance
|
|
1012
|
+
|
|
1013
|
+
df = []
|
|
1014
|
+
headers = []
|
|
1015
|
+
# build a list of column headers
|
|
1016
|
+
for cur_field in obj_fields:
|
|
1017
|
+
if cur_field.sort_order >= 0:
|
|
1018
|
+
headers.append(cur_field.column_name)
|
|
1019
|
+
for cur_obj in obj_list:
|
|
1020
|
+
cur_row = []
|
|
1021
|
+
for cur_field in obj_fields:
|
|
1022
|
+
# If the value is a foreign key, we need to look up the display value
|
|
1023
|
+
if cur_field.sort_order >= 0:
|
|
1024
|
+
cur_row.append(get_field_lookup_value(cur_field, cur_obj))
|
|
1025
|
+
df.append(cur_row)
|
|
1026
|
+
return pd.DataFrame(df, columns=headers)
|
|
1027
|
+
|
|
1028
|
+
|
|
1029
|
+
def get_field_lookup_value(cur_field: FieldMakeup, cur_obj: dict) -> str:
|
|
1030
|
+
"""
|
|
1031
|
+
Looks up the lookup value for the specified field in the passed in dict of lookup values
|
|
1032
|
+
|
|
1033
|
+
:param FieldMakeup cur_field: the current field metadata
|
|
1034
|
+
:param dict cur_obj: a dict of the lookup values
|
|
1035
|
+
:return: str the value to append
|
|
1036
|
+
:rtype: str
|
|
1037
|
+
"""
|
|
1038
|
+
if cur_field.field_name in cur_obj.keys():
|
|
1039
|
+
if len(cur_field.lookup_field) > 0:
|
|
1040
|
+
return lookup_value(cur_field.lookup_field, cur_obj[cur_field.field_name])
|
|
1041
|
+
else:
|
|
1042
|
+
return cur_obj[cur_field.field_name]
|
|
1043
|
+
return ""
|
|
1044
|
+
|
|
1045
|
+
|
|
1046
|
+
def lookup_value(lookup_field: str, lookup_value_str: str) -> str:
|
|
1047
|
+
"""
|
|
1048
|
+
This method looks up the display value for a foreign key value.
|
|
1049
|
+
|
|
1050
|
+
:param str lookup_field: the field name of the lookup field
|
|
1051
|
+
:param str lookup_value_str: the foreign key value to look up
|
|
1052
|
+
:return: str the display value to present to the user
|
|
1053
|
+
:rtype: str
|
|
1054
|
+
"""
|
|
1055
|
+
if lookup_value_str is not None:
|
|
1056
|
+
df = lookup_dfs[lookup_field]
|
|
1057
|
+
if lookup_field == "user":
|
|
1058
|
+
lookup_col = "UserId"
|
|
1059
|
+
return_col = "User"
|
|
1060
|
+
elif lookup_field == "module":
|
|
1061
|
+
lookup_col = "name"
|
|
1062
|
+
return_col = "name"
|
|
1063
|
+
else:
|
|
1064
|
+
lookup_col = "id"
|
|
1065
|
+
return_col = "name"
|
|
1066
|
+
if len(df) > 0:
|
|
1067
|
+
for cur_row in df.itertuples():
|
|
1068
|
+
cur_row_dict = getattr(cur_row, lookup_col)
|
|
1069
|
+
if lookup_value_str == cur_row_dict:
|
|
1070
|
+
return getattr(cur_row, return_col)
|
|
1071
|
+
return lookup_value_str
|
|
1072
|
+
|
|
1073
|
+
|
|
1074
|
+
# pylint: disable=R0912,R0915,R0914
|
|
1075
|
+
def build_workbook(path: str, workbook_filename: str, workbook_title: str) -> Workbook: # noqa C901
|
|
1076
|
+
"""
|
|
1077
|
+
This method creates the Excel workbook, populating it with the appropriate
|
|
1078
|
+
column headings and lookup sheets.
|
|
1079
|
+
|
|
1080
|
+
:param str path: The folder in which to save the created workbook
|
|
1081
|
+
:param str workbook_filename: The filename to use for the generated workbook
|
|
1082
|
+
:param str workbook_title: The title to use for the primary sheet in the workbook
|
|
1083
|
+
:return: Workbook The generated workbook
|
|
1084
|
+
:rtype: Workbook
|
|
1085
|
+
"""
|
|
1086
|
+
workbook_sheets = []
|
|
1087
|
+
workbook = Workbook()
|
|
1088
|
+
worksheet = workbook.active
|
|
1089
|
+
worksheet.title = workbook_title
|
|
1090
|
+
column_headers = build_header_list()
|
|
1091
|
+
for col, val in enumerate(column_headers, start=1):
|
|
1092
|
+
worksheet.cell(row=1, column=col).value = val
|
|
1093
|
+
worksheet.cell(row=1, column=col).font = Font(bold=True)
|
|
1094
|
+
set_col_for_field(worksheet.cell(row=1, column=col).column_letter, val)
|
|
1095
|
+
|
|
1096
|
+
# create and format reference worksheets for dropdowns
|
|
1097
|
+
for cur_field in obj_fields:
|
|
1098
|
+
if cur_field.sort_order >= 0:
|
|
1099
|
+
if len(cur_field.lookup_field) > 0:
|
|
1100
|
+
workbook.create_sheet(title=cur_field.column_name)
|
|
1101
|
+
workbook_sheets.append(cur_field.column_name)
|
|
1102
|
+
if cur_field.treat_enum_as_lookup:
|
|
1103
|
+
workbook.create_sheet(title=cur_field.column_name)
|
|
1104
|
+
workbook_sheets.append(cur_field.column_name)
|
|
1105
|
+
|
|
1106
|
+
workbook.save(filename=os.path.join(path, workbook_filename))
|
|
1107
|
+
|
|
1108
|
+
build_workbook_lookup_sheets(path, workbook_filename)
|
|
1109
|
+
|
|
1110
|
+
return build_workbook_data_validations(path, workbook_filename, workbook_sheets)
|
|
1111
|
+
|
|
1112
|
+
|
|
1113
|
+
def build_workbook_lookup_sheets(path: str, workbook_filename: str) -> None:
|
|
1114
|
+
"""
|
|
1115
|
+
This method builds the workbook supporting sheets with lookup data.
|
|
1116
|
+
|
|
1117
|
+
:param str path: The folder in which to save the created workbook
|
|
1118
|
+
:param str workbook_filename: The filename to use for the generated workbook
|
|
1119
|
+
:return: None
|
|
1120
|
+
:rtype: None
|
|
1121
|
+
"""
|
|
1122
|
+
import pandas as pd # Optimize import performance
|
|
1123
|
+
|
|
1124
|
+
# Pull in reference data for drop-downs
|
|
1125
|
+
with pd.ExcelWriter(
|
|
1126
|
+
os.path.join(path, workbook_filename),
|
|
1127
|
+
mode="a",
|
|
1128
|
+
engine="openpyxl",
|
|
1129
|
+
if_sheet_exists="overlay",
|
|
1130
|
+
) as writer:
|
|
1131
|
+
for cur_field in obj_fields:
|
|
1132
|
+
if cur_field.sort_order >= 0:
|
|
1133
|
+
if len(cur_field.lookup_field) > 0:
|
|
1134
|
+
get_data_frame(cur_field.lookup_field).to_excel(
|
|
1135
|
+
writer,
|
|
1136
|
+
sheet_name=cur_field.column_name,
|
|
1137
|
+
index=False,
|
|
1138
|
+
)
|
|
1139
|
+
if cur_field.treat_enum_as_lookup:
|
|
1140
|
+
create_enum_data_frame(cur_field.enum_values).to_excel(
|
|
1141
|
+
writer,
|
|
1142
|
+
sheet_name=cur_field.column_name,
|
|
1143
|
+
index=False,
|
|
1144
|
+
)
|
|
1145
|
+
|
|
1146
|
+
|
|
1147
|
+
def build_workbook_data_validations(path: str, workbook_filename: str, workbook_sheets: list) -> Workbook:
|
|
1148
|
+
"""
|
|
1149
|
+
This method builds the data validations for the workbook being built
|
|
1150
|
+
|
|
1151
|
+
:param str path: The folder in which to save the created workbook
|
|
1152
|
+
:param str workbook_filename: The filename to use for the generated workbook
|
|
1153
|
+
:param list workbook_sheets: The list of workbook sheets to create as lookups
|
|
1154
|
+
:return: Workbook The generated workbook
|
|
1155
|
+
:rtype: Workbook
|
|
1156
|
+
"""
|
|
1157
|
+
workbook = load_workbook(os.path.join(path, workbook_filename))
|
|
1158
|
+
worksheet = workbook.active
|
|
1159
|
+
|
|
1160
|
+
for sheet in workbook_sheets:
|
|
1161
|
+
workbook[sheet].protection.sheet = True
|
|
1162
|
+
|
|
1163
|
+
# create data validations for enum values
|
|
1164
|
+
data_validations_info = []
|
|
1165
|
+
date_cols = []
|
|
1166
|
+
edit_cols = []
|
|
1167
|
+
for cur_field in obj_fields:
|
|
1168
|
+
if cur_field.sort_order >= 0:
|
|
1169
|
+
if len(cur_field.lookup_field) > 0 or cur_field.treat_enum_as_lookup:
|
|
1170
|
+
dv_info = {
|
|
1171
|
+
"sheet": cur_field.column_name,
|
|
1172
|
+
"columns": [cur_field.cell_col],
|
|
1173
|
+
"allow_blank": True,
|
|
1174
|
+
}
|
|
1175
|
+
data_validations_info.append(dv_info)
|
|
1176
|
+
elif len(cur_field.enum_values) > 0:
|
|
1177
|
+
val_str = ""
|
|
1178
|
+
for cur_enum in cur_field.enum_values:
|
|
1179
|
+
val_str += cur_enum + ", "
|
|
1180
|
+
val_str = val_str[: len(val_str) - 2]
|
|
1181
|
+
dv_info = {
|
|
1182
|
+
"formula1": '"' + val_str + '"',
|
|
1183
|
+
"columns": [cur_field.cell_col],
|
|
1184
|
+
"allow_blank": True,
|
|
1185
|
+
}
|
|
1186
|
+
data_validations_info.append(dv_info)
|
|
1187
|
+
elif cur_field.treat_as_date:
|
|
1188
|
+
dv_info = {
|
|
1189
|
+
"type": "date",
|
|
1190
|
+
"columns": [cur_field.cell_col],
|
|
1191
|
+
"allow_blank": False,
|
|
1192
|
+
}
|
|
1193
|
+
data_validations_info.append(dv_info)
|
|
1194
|
+
date_cols.append(cur_field.cell_col)
|
|
1195
|
+
elif cur_field.field_name != "id":
|
|
1196
|
+
edit_cols.append(cur_field.cell_col)
|
|
1197
|
+
|
|
1198
|
+
create_data_validations(
|
|
1199
|
+
data_validations_info=data_validations_info,
|
|
1200
|
+
workbook=workbook,
|
|
1201
|
+
worksheet=worksheet,
|
|
1202
|
+
)
|
|
1203
|
+
workbook.save(filename=os.path.join(path, workbook_filename))
|
|
1204
|
+
|
|
1205
|
+
return set_date_style_to_workbook(path, workbook_filename, edit_cols, date_cols)
|
|
1206
|
+
|
|
1207
|
+
|
|
1208
|
+
# pylint: enable=R0912,R0915,R0914
|
|
1209
|
+
def set_date_style_to_workbook(path: str, workbook_filename: str, edit_cols: list, date_cols: list) -> Workbook:
|
|
1210
|
+
"""
|
|
1211
|
+
This method sets the date style and header row freeze on the specified workbook.
|
|
1212
|
+
|
|
1213
|
+
:param str path: The folder in which to save the created workbook
|
|
1214
|
+
:param str workbook_filename: The filename to use for the generated workbook
|
|
1215
|
+
:param list edit_cols: a list of columns that should be protected
|
|
1216
|
+
:param list date_cols: a list of columns that should be formatted as date
|
|
1217
|
+
:return: Workbook The generated workbook
|
|
1218
|
+
:rtype: Workbook
|
|
1219
|
+
"""
|
|
1220
|
+
# Freezing top row and adding data style to date columns to assure validation
|
|
1221
|
+
workbook = load_workbook(os.path.join(path, workbook_filename))
|
|
1222
|
+
worksheet = workbook.active
|
|
1223
|
+
worksheet.freeze_panes = "A2"
|
|
1224
|
+
date_style = NamedStyle(name="date_style", number_format="mm/dd/yyyy")
|
|
1225
|
+
workbook.add_named_style(date_style)
|
|
1226
|
+
|
|
1227
|
+
for col in date_cols: # Columns to edit
|
|
1228
|
+
for cell in worksheet[col]:
|
|
1229
|
+
if cell.row > 1:
|
|
1230
|
+
cell.style = date_style
|
|
1231
|
+
|
|
1232
|
+
# Adjusting width of columns
|
|
1233
|
+
adjust_column_widths_and_styles(worksheet, edit_cols, date_cols, date_style)
|
|
1234
|
+
|
|
1235
|
+
workbook.save(filename=os.path.join(path, workbook_filename))
|
|
1236
|
+
return workbook
|
|
1237
|
+
|
|
1238
|
+
|
|
1239
|
+
def get_maximum_rows(*, sheet_object: object) -> int:
|
|
1240
|
+
"""
|
|
1241
|
+
This function finds the last row containing data in a spreadsheet
|
|
1242
|
+
|
|
1243
|
+
:param object sheet_object: excel worksheet to be referenced
|
|
1244
|
+
:return: int representing last row with data in spreadsheet
|
|
1245
|
+
:rtype: int
|
|
1246
|
+
"""
|
|
1247
|
+
return sum(any(col.value is not None for col in row) for max_row, row in enumerate(sheet_object, 1))
|
|
1248
|
+
|
|
1249
|
+
|
|
1250
|
+
def get_workbook_title(obj_type: str, prefix: str, postfix: str) -> str:
|
|
1251
|
+
"""
|
|
1252
|
+
This method generates the name for the new workbook being generated.
|
|
1253
|
+
|
|
1254
|
+
:param str obj_type: The model type to generate a workbook for
|
|
1255
|
+
:param str prefix: The prefix to use when creating the new workbook
|
|
1256
|
+
:param str postfix: The postfix to use when creating the new workbook
|
|
1257
|
+
:return: str The name of the new workbook being generated
|
|
1258
|
+
:rtype: str
|
|
1259
|
+
"""
|
|
1260
|
+
return prefix + convert_property_to_column_label(obj_type) + postfix
|
|
1261
|
+
|
|
1262
|
+
|
|
1263
|
+
def get_all_by_parent(obj_type: str, parent_id: int, parent_module: str) -> list:
|
|
1264
|
+
"""
|
|
1265
|
+
Uses the get_all_by_parent() method on the appropriate model to get the data
|
|
1266
|
+
|
|
1267
|
+
:param str obj_type: the type of model to return
|
|
1268
|
+
:param int parent_id: the parent id to use to retrieve the records
|
|
1269
|
+
:param str parent_module: the parent module to use for retrieving the records
|
|
1270
|
+
:return: list of records
|
|
1271
|
+
:rtype: list
|
|
1272
|
+
"""
|
|
1273
|
+
rtn_list = []
|
|
1274
|
+
if obj_type == "assessment":
|
|
1275
|
+
rtn_list = Assessment.get_all_by_parent(parent_id, parent_module)
|
|
1276
|
+
elif obj_type == "control":
|
|
1277
|
+
rtn_list = ControlImplementation.get_all_by_parent(parent_id, parent_module)
|
|
1278
|
+
elif obj_type == "issue":
|
|
1279
|
+
rtn_list = Issue.get_all_by_parent(parent_id, parent_module)
|
|
1280
|
+
elif obj_type == "asset":
|
|
1281
|
+
rtn_list = Asset.get_all_by_parent(parent_id, parent_module)
|
|
1282
|
+
elif obj_type == "component":
|
|
1283
|
+
rtn_list = Component.get_all_by_parent(parent_id, parent_module)
|
|
1284
|
+
elif obj_type == "risk":
|
|
1285
|
+
rtn_list = Risk.get_all_by_parent(parent_id, parent_module)
|
|
1286
|
+
# elif obj_type == "task":
|
|
1287
|
+
# return []
|
|
1288
|
+
return convert_all_to_dict(rtn_list)
|
|
1289
|
+
|
|
1290
|
+
|
|
1291
|
+
def convert_all_to_dict(objs: list) -> list:
|
|
1292
|
+
"""
|
|
1293
|
+
Converts a list of model objects to a list of dictionaries.
|
|
1294
|
+
|
|
1295
|
+
:param list objs: List of objects to convert to dict
|
|
1296
|
+
:return list: List of dict
|
|
1297
|
+
:return list:
|
|
1298
|
+
"""
|
|
1299
|
+
rtn_list = []
|
|
1300
|
+
for obj in objs:
|
|
1301
|
+
dict_obj = obj.dict()
|
|
1302
|
+
rtn_list.append(dict_obj)
|
|
1303
|
+
return rtn_list
|
|
1304
|
+
|
|
1305
|
+
|
|
1306
|
+
def get_all_by_query(obj: object, parent_id: int, parent_module: str, app: Application) -> list:
|
|
1307
|
+
"""
|
|
1308
|
+
Uses the get_export_query() method on the model to get the data
|
|
1309
|
+
|
|
1310
|
+
:param object obj: the instance of the model type specified
|
|
1311
|
+
:param int parent_id: the parent id to use to retrieve the records
|
|
1312
|
+
:param str parent_module: the parent module to use for retrieving the records
|
|
1313
|
+
:param Application app: the application object
|
|
1314
|
+
:return: list the collection of records
|
|
1315
|
+
:rtype: list
|
|
1316
|
+
"""
|
|
1317
|
+
return obj.get_export_query(app, parent_id, parent_module)
|
|
1318
|
+
|
|
1319
|
+
|
|
1320
|
+
def get_obj(obj_type: str) -> object:
|
|
1321
|
+
"""
|
|
1322
|
+
Returns an instance of the object type specified by the user.
|
|
1323
|
+
|
|
1324
|
+
:param str obj_type: the type of model to return
|
|
1325
|
+
:return: object of the mode specified
|
|
1326
|
+
:rtype: object
|
|
1327
|
+
"""
|
|
1328
|
+
object_mapping = {
|
|
1329
|
+
"assessment": Assessment(),
|
|
1330
|
+
"control": ControlImplementation(controlOwnerId="", status="", controlID=0),
|
|
1331
|
+
"issue": Issue(),
|
|
1332
|
+
"asset": Asset(name="", assetType="", status="", assetCategory=""),
|
|
1333
|
+
"component": Component(title="", description="", componentType=ComponentType.ComplianceArtifact),
|
|
1334
|
+
"risk": Risk(),
|
|
1335
|
+
}
|
|
1336
|
+
if obj_type in object_mapping:
|
|
1337
|
+
obj = object_mapping[obj_type]
|
|
1338
|
+
build_object_field_list(obj)
|
|
1339
|
+
return obj
|
|
1340
|
+
return None
|
|
1341
|
+
|
|
1342
|
+
|
|
1343
|
+
def build_object_field_list(obj: object) -> None:
|
|
1344
|
+
"""
|
|
1345
|
+
This method examines the instantiated model and extracts the list of fields and
|
|
1346
|
+
other information needed for the model processing.
|
|
1347
|
+
|
|
1348
|
+
:param object obj: This should be a model object that is descended from RegScaleModel
|
|
1349
|
+
:return: None
|
|
1350
|
+
:rtype: None
|
|
1351
|
+
"""
|
|
1352
|
+
# Build the list of fields for the model type
|
|
1353
|
+
pos_dict = obj.get_sort_position_dict()
|
|
1354
|
+
field_names = obj.model_fields.keys()
|
|
1355
|
+
extra_fields = obj.get_extra_fields()
|
|
1356
|
+
include_field_list = obj.get_include_fields()
|
|
1357
|
+
for item in include_field_list:
|
|
1358
|
+
include_fields.append(item)
|
|
1359
|
+
for cur_field in field_names:
|
|
1360
|
+
if cur_field not in exclude_fields:
|
|
1361
|
+
field_makeup = FieldMakeup(
|
|
1362
|
+
cur_field,
|
|
1363
|
+
convert_property_to_column_label(cur_field),
|
|
1364
|
+
get_field_data_type(obj.model_fields[cur_field]),
|
|
1365
|
+
)
|
|
1366
|
+
field_makeup.sort_order = find_sort_pos(cur_field, pos_dict)
|
|
1367
|
+
field_makeup.enum_values = obj.get_enum_values(cur_field)
|
|
1368
|
+
field_makeup.treat_enum_as_lookup = should_treat_enum_as_lookup(field_makeup.enum_values)
|
|
1369
|
+
field_makeup.lookup_field = obj.get_lookup_field(cur_field)
|
|
1370
|
+
field_makeup.treat_as_date = obj.is_date_field(cur_field)
|
|
1371
|
+
field_makeup.required = is_field_required(obj, cur_field)
|
|
1372
|
+
if field_makeup.sort_order >= 0:
|
|
1373
|
+
obj_fields.append(field_makeup)
|
|
1374
|
+
for cur_field in extra_fields:
|
|
1375
|
+
field_makeup = FieldMakeup(cur_field, convert_property_to_column_label(cur_field), "str")
|
|
1376
|
+
field_makeup.sort_order = find_sort_pos(cur_field, pos_dict)
|
|
1377
|
+
if field_makeup.sort_order >= 0:
|
|
1378
|
+
obj_fields.append(field_makeup)
|
|
1379
|
+
obj_fields.sort(key=attrgetter("sort_order"))
|
|
1380
|
+
|
|
1381
|
+
|
|
1382
|
+
def should_treat_enum_as_lookup(enum_values: list) -> bool:
|
|
1383
|
+
"""
|
|
1384
|
+
This method concatenates the list of enums together into a single string to get the total
|
|
1385
|
+
length and determine if it is too long to be a treated as an enum in the workbook.
|
|
1386
|
+
|
|
1387
|
+
:param list enum_values: the list of enum values for this field
|
|
1388
|
+
:return: bool indicating if the list is too long to treat as an enum
|
|
1389
|
+
:rtype: bool
|
|
1390
|
+
"""
|
|
1391
|
+
enum_str = ""
|
|
1392
|
+
for cur_val in enum_values:
|
|
1393
|
+
if len(enum_str) > 0:
|
|
1394
|
+
enum_str += ", "
|
|
1395
|
+
enum_str += cur_val
|
|
1396
|
+
if len(enum_str) > 256:
|
|
1397
|
+
return True
|
|
1398
|
+
return False
|
|
1399
|
+
|
|
1400
|
+
|
|
1401
|
+
def get_field_data_type(field_info: FieldInfo) -> str:
|
|
1402
|
+
"""
|
|
1403
|
+
determine the data type of the field from the field info annotation.
|
|
1404
|
+
|
|
1405
|
+
:param FieldInfo field_info: The field annotation taken from the model object
|
|
1406
|
+
:return: str the data type to use for the field
|
|
1407
|
+
:rtype: str
|
|
1408
|
+
"""
|
|
1409
|
+
if field_info.annotation == dict:
|
|
1410
|
+
return "dict"
|
|
1411
|
+
if field_info.annotation in (int, Union[int, None]):
|
|
1412
|
+
return "int"
|
|
1413
|
+
if field_info.annotation in (bool, Union[bool, None]):
|
|
1414
|
+
return "bool"
|
|
1415
|
+
if field_info.annotation in (str, Union[str, None], Union[str, int, None]):
|
|
1416
|
+
return "str"
|
|
1417
|
+
if field_info.annotation == Union[float, None]:
|
|
1418
|
+
return "float"
|
|
1419
|
+
return "enum"
|
|
1420
|
+
|
|
1421
|
+
|
|
1422
|
+
def is_field_required(obj: object, field_name: str) -> bool:
|
|
1423
|
+
"""
|
|
1424
|
+
Determine if the field is required from the annotation. If its a Union, odds are
|
|
1425
|
+
that one of the options is None, so assume it's not required. If it's a simple data type
|
|
1426
|
+
then assume its required.
|
|
1427
|
+
|
|
1428
|
+
:param object obj: The object to check to determine if the field is required
|
|
1429
|
+
:param str field_name: The field name to be checked
|
|
1430
|
+
:return: bool indicating if the field is required
|
|
1431
|
+
:rtype: bool
|
|
1432
|
+
"""
|
|
1433
|
+
field_info = obj.model_fields[field_name]
|
|
1434
|
+
if field_info.annotation == dict:
|
|
1435
|
+
return True
|
|
1436
|
+
if field_info.annotation == int:
|
|
1437
|
+
return True
|
|
1438
|
+
if field_info.annotation == bool:
|
|
1439
|
+
return True
|
|
1440
|
+
if field_info.annotation == str:
|
|
1441
|
+
return True
|
|
1442
|
+
return obj.is_required_field(field_name)
|
|
1443
|
+
|
|
1444
|
+
|
|
1445
|
+
def convert_property_to_column_label(field_name: str) -> str:
|
|
1446
|
+
"""
|
|
1447
|
+
This method takes the property name from the model and converts it into
|
|
1448
|
+
a column header label by capitalizing the first letter and removing "Id" from
|
|
1449
|
+
the end (assuming these are foreign keys)
|
|
1450
|
+
|
|
1451
|
+
:param str field_name: The property name as read from the model
|
|
1452
|
+
:return: str a formatted column header label
|
|
1453
|
+
:rtype: str
|
|
1454
|
+
"""
|
|
1455
|
+
rtn_field_name = field_name
|
|
1456
|
+
field_ext = field_name[len(field_name) - 2 :]
|
|
1457
|
+
if field_ext == "Id":
|
|
1458
|
+
rtn_field_name = field_name[: len(field_name) - 2]
|
|
1459
|
+
rtn_field_name = rtn_field_name[:1].upper() + rtn_field_name[1:]
|
|
1460
|
+
return rtn_field_name
|
|
1461
|
+
|
|
1462
|
+
|
|
1463
|
+
def set_col_for_field(col: str, col_name: str) -> None:
|
|
1464
|
+
"""
|
|
1465
|
+
This method updates the list of fields for the model being processed with the
|
|
1466
|
+
Excel column letter(s).
|
|
1467
|
+
|
|
1468
|
+
:param str col: The letter(s) column
|
|
1469
|
+
:param str col_name: The column name
|
|
1470
|
+
:return: None
|
|
1471
|
+
:rtype: None
|
|
1472
|
+
"""
|
|
1473
|
+
for cur_field in obj_fields:
|
|
1474
|
+
if col_name == cur_field.column_name:
|
|
1475
|
+
cur_field.cell_col = col
|
|
1476
|
+
|
|
1477
|
+
|
|
1478
|
+
def find_sort_pos(field_name: str, pos_dict: dict) -> int:
|
|
1479
|
+
"""
|
|
1480
|
+
This method is to be called from the derived classes to find and return the
|
|
1481
|
+
sort position in the specified dict of field names and positions.
|
|
1482
|
+
|
|
1483
|
+
:param str field_name: The property name to specify the sort of
|
|
1484
|
+
:param dict pos_dict: a dict of field names and positions
|
|
1485
|
+
:return: int
|
|
1486
|
+
:rtype: int
|
|
1487
|
+
"""
|
|
1488
|
+
# A return value of -1 supress the value, leaving it out of the generated spreadsheet
|
|
1489
|
+
rtn_value = -1
|
|
1490
|
+
if field_name in pos_dict.keys():
|
|
1491
|
+
rtn_value = pos_dict[field_name]
|
|
1492
|
+
return rtn_value
|
|
1493
|
+
|
|
1494
|
+
|
|
1495
|
+
def build_header_list() -> list:
|
|
1496
|
+
"""
|
|
1497
|
+
This method iterates through the list of fields and builds a list of column headers.
|
|
1498
|
+
|
|
1499
|
+
:return: list of str
|
|
1500
|
+
:rtype: list
|
|
1501
|
+
"""
|
|
1502
|
+
headers = []
|
|
1503
|
+
for cur_field in obj_fields:
|
|
1504
|
+
if cur_field.sort_order >= 0:
|
|
1505
|
+
logger.debug(cur_field.column_name + " - " + str(cur_field.sort_order))
|
|
1506
|
+
headers.append(cur_field.column_name)
|
|
1507
|
+
return headers
|
|
1508
|
+
|
|
1509
|
+
|
|
1510
|
+
def get_data_frame(field_name: str) -> "pd.DataFrame":
|
|
1511
|
+
"""
|
|
1512
|
+
This method retrieves a data frame populated with the lookup data
|
|
1513
|
+
for a foreign key field.
|
|
1514
|
+
|
|
1515
|
+
:param str field_name: the field name of the foreign key data
|
|
1516
|
+
:return: pd.DataFrame the populated data frame
|
|
1517
|
+
:rtype: pd.DataFrame
|
|
1518
|
+
"""
|
|
1519
|
+
if field_name == "user":
|
|
1520
|
+
df = get_user_names()
|
|
1521
|
+
lookup_dfs["user"] = df
|
|
1522
|
+
return df
|
|
1523
|
+
if field_name == "module":
|
|
1524
|
+
df = get_module_list()
|
|
1525
|
+
lookup_dfs["module"] = df
|
|
1526
|
+
return df
|
|
1527
|
+
df = get_field_names(field_name)
|
|
1528
|
+
lookup_dfs[field_name] = df
|
|
1529
|
+
return df
|
|
1530
|
+
|
|
1531
|
+
|
|
1532
|
+
def create_enum_data_frame(enum_list: list) -> "pd.DataFrame":
|
|
1533
|
+
"""
|
|
1534
|
+
This method will take a list of enum values are format them as a Data Frame.
|
|
1535
|
+
|
|
1536
|
+
:param list enum_list: The list of enum values
|
|
1537
|
+
:return: pd.DataFrame the populated data frame
|
|
1538
|
+
:rtype: pd.DataFrame
|
|
1539
|
+
"""
|
|
1540
|
+
import pandas as pd # Optimize import performance
|
|
1541
|
+
|
|
1542
|
+
return pd.DataFrame(enum_list, columns=["name"])
|
|
1543
|
+
|
|
1544
|
+
|
|
1545
|
+
def get_facility_list() -> "pd.DataFrame":
|
|
1546
|
+
"""
|
|
1547
|
+
This method returns a list of facilities in a data frame
|
|
1548
|
+
|
|
1549
|
+
:return: pd.DataFrame the populated data frame
|
|
1550
|
+
:rtype: pd.DataFrame
|
|
1551
|
+
"""
|
|
1552
|
+
import pandas as pd # Optimize import performance
|
|
1553
|
+
|
|
1554
|
+
fac_list = Facility.get_list()
|
|
1555
|
+
field_names = [[i["name"], i["id"]] for i in fac_list]
|
|
1556
|
+
all_names = pd.DataFrame(field_names, index=None, columns=["name", "id"])
|
|
1557
|
+
|
|
1558
|
+
return all_names
|
|
1559
|
+
|
|
1560
|
+
|
|
1561
|
+
def get_module_list() -> "pd.DataFrame":
|
|
1562
|
+
"""
|
|
1563
|
+
This method returns a list of modules in a data frame
|
|
1564
|
+
|
|
1565
|
+
:return: pd.DataFrame the populated data frame
|
|
1566
|
+
:rtype: pd.DataFrame
|
|
1567
|
+
"""
|
|
1568
|
+
import pandas as pd # Optimize import performance
|
|
1569
|
+
|
|
1570
|
+
list_of_modules = Modules().api_names()
|
|
1571
|
+
return pd.DataFrame(list_of_modules, columns=["name"])
|
|
1572
|
+
|
|
1573
|
+
|
|
1574
|
+
def get_field_names(field_name: str) -> "pd.DataFrame":
|
|
1575
|
+
"""
|
|
1576
|
+
This function uses GraphQL to retrieve all names of a given parent table in database
|
|
1577
|
+
|
|
1578
|
+
:param str field_name: the foreign key table to retrieve
|
|
1579
|
+
:return: pandas dataframe with facility names
|
|
1580
|
+
:rtype: pd.DataFrame
|
|
1581
|
+
"""
|
|
1582
|
+
import pandas as pd # Optimize import performance
|
|
1583
|
+
|
|
1584
|
+
api = Api()
|
|
1585
|
+
|
|
1586
|
+
body = """
|
|
1587
|
+
query {
|
|
1588
|
+
field_name(skip: 0, take: 50, order: {name: ASC}, ) {
|
|
1589
|
+
items {
|
|
1590
|
+
name
|
|
1591
|
+
id
|
|
1592
|
+
}
|
|
1593
|
+
totalCount
|
|
1594
|
+
pageInfo {
|
|
1595
|
+
hasNextPage
|
|
1596
|
+
}
|
|
1597
|
+
}
|
|
1598
|
+
}
|
|
1599
|
+
""".replace(
|
|
1600
|
+
"field_name", field_name
|
|
1601
|
+
)
|
|
1602
|
+
|
|
1603
|
+
field_items = api.graph(query=body)
|
|
1604
|
+
names = field_items[str(field_name)]["items"]
|
|
1605
|
+
field_names = [[i["name"], i["id"]] for i in names]
|
|
1606
|
+
all_names = pd.DataFrame(field_names, index=None, columns=["name", "id"])
|
|
1607
|
+
|
|
1608
|
+
return all_names
|
|
1609
|
+
|
|
1610
|
+
|
|
1611
|
+
def create_data_validations(data_validations_info: list, workbook: Workbook, worksheet: Worksheet) -> None:
|
|
1612
|
+
"""
|
|
1613
|
+
Function to create data validations for excel worksheet
|
|
1614
|
+
|
|
1615
|
+
:param list data_validations_info: List containing dictionaries with
|
|
1616
|
+
information for data validations
|
|
1617
|
+
:param Workbook workbook: Workbook object to add data validations to
|
|
1618
|
+
:param Worksheet worksheet: The worksheet object to add data validations to
|
|
1619
|
+
:return: None
|
|
1620
|
+
:rtype: None
|
|
1621
|
+
"""
|
|
1622
|
+
for _, dv_info in enumerate(data_validations_info, start=1):
|
|
1623
|
+
formula1 = dv_info.get("formula1")
|
|
1624
|
+
if sheet_name := dv_info.get("sheet"):
|
|
1625
|
+
formula1 = f"={sheet_name}!$A$2:$A${str(get_maximum_rows(sheet_object=workbook[sheet_name]))}"
|
|
1626
|
+
|
|
1627
|
+
data_validation = DataValidation(
|
|
1628
|
+
type=dv_info.get("type", "list"),
|
|
1629
|
+
formula1=formula1,
|
|
1630
|
+
allow_blank=dv_info.get("allow_blank", True),
|
|
1631
|
+
showDropDown=False,
|
|
1632
|
+
error=(SELECTION_ERROR if dv_info.get("type", "list") == "list" else INVALID_ENTRY_ERROR),
|
|
1633
|
+
errorTitle=INVALID_ENTRY_TITLE,
|
|
1634
|
+
prompt=(SELECT_PROMPT if dv_info.get("type", "list") == "list" else DATE_ENTRY_PROMPT),
|
|
1635
|
+
showErrorMessage=True if dv_info.get("type", "date") else None,
|
|
1636
|
+
showInputMessage=True if dv_info.get("type", "date") else None,
|
|
1637
|
+
)
|
|
1638
|
+
|
|
1639
|
+
worksheet.add_data_validation(data_validation)
|
|
1640
|
+
for column in dv_info["columns"]:
|
|
1641
|
+
data_validation.add(f"{column}2:{column}1048576")
|
|
1642
|
+
|
|
1643
|
+
|
|
1644
|
+
def adjust_column_widths_and_styles(
|
|
1645
|
+
worksheet: Worksheet,
|
|
1646
|
+
editable_columns: Optional[list[str]] = None,
|
|
1647
|
+
date_columns: Optional[list[str]] = None,
|
|
1648
|
+
date_col_style: Optional[NamedStyle] = None,
|
|
1649
|
+
) -> None:
|
|
1650
|
+
"""
|
|
1651
|
+
Function to adjust column widths based on length of data in column, and apply
|
|
1652
|
+
styles to specific columns and rows
|
|
1653
|
+
|
|
1654
|
+
:param Worksheet worksheet: Worksheet to adjust column widths for
|
|
1655
|
+
:param Optional[list[str]] editable_columns: List of rows to unlock for editing
|
|
1656
|
+
:param Optional[list[str]] date_columns: List of columns to add date style to
|
|
1657
|
+
:param Optional[NamedStyle] date_col_style: NamedStyle object to apply to date columns, defaults to None
|
|
1658
|
+
:return: None
|
|
1659
|
+
:rtype: None
|
|
1660
|
+
"""
|
|
1661
|
+
editable_columns = editable_columns or []
|
|
1662
|
+
date_columns = date_columns or []
|
|
1663
|
+
for col in worksheet.columns:
|
|
1664
|
+
max_length = 0
|
|
1665
|
+
column_letter = col[0].column_letter
|
|
1666
|
+
|
|
1667
|
+
for cell in col:
|
|
1668
|
+
# Determine max length for column width
|
|
1669
|
+
cell_length = len(str(cell.value))
|
|
1670
|
+
max_length = max(max_length, cell_length)
|
|
1671
|
+
cell.protection = Protection(locked=True)
|
|
1672
|
+
|
|
1673
|
+
# Set cell protection for specific columns
|
|
1674
|
+
if column_letter in editable_columns and cell.row > 1:
|
|
1675
|
+
cell.protection = Protection(locked=False)
|
|
1676
|
+
|
|
1677
|
+
# Apply date style for specific columns and rows
|
|
1678
|
+
if column_letter in date_columns and cell.row > 1 and date_col_style:
|
|
1679
|
+
cell.style = date_col_style
|
|
1680
|
+
|
|
1681
|
+
# Set adjusted column width
|
|
1682
|
+
adjusted_width = (max_length + 2) * 1.2
|
|
1683
|
+
worksheet.column_dimensions[column_letter].width = adjusted_width
|
|
1684
|
+
|
|
1685
|
+
|
|
1686
|
+
def match_fields_to_data(model_data: list) -> None:
|
|
1687
|
+
"""
|
|
1688
|
+
This method is to iterate through the list of model fields, and eliminate any that are not
|
|
1689
|
+
included in the data returned from RegScale. This is for those models that are using a
|
|
1690
|
+
graphQL query to return the data, which may not include all the fields on the model.
|
|
1691
|
+
|
|
1692
|
+
:param list model_data: the data returned from the query
|
|
1693
|
+
:return: None
|
|
1694
|
+
:rtype: None
|
|
1695
|
+
"""
|
|
1696
|
+
for cur_rec in model_data:
|
|
1697
|
+
cur_rec_keys = cur_rec.keys()
|
|
1698
|
+
for cur_field in obj_fields:
|
|
1699
|
+
if cur_field.field_name not in cur_rec_keys:
|
|
1700
|
+
if cur_field.field_name not in include_fields:
|
|
1701
|
+
cur_field.sort_order = -1
|