bitwarden_workflow_linter 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,173 @@
1
+ """Module providing Lint subcommand to run custom linting rules against GitHub Action
2
+ Workflows."""
3
+
4
+ import argparse
5
+ import os
6
+
7
+ from functools import reduce
8
+ from typing import Optional
9
+
10
+ from .load import WorkflowBuilder, Rules
11
+ from .utils import LintFinding, Settings
12
+
13
+
14
+ class LinterCmd:
15
+ """Command to lint GitHub Action Workflow files
16
+
17
+ This class contains logic to lint workflows that are passed in.
18
+ Supporting logic is supplied to:
19
+ - build out the list of Rules desired
20
+ - select and validate the workflow files to lint
21
+ """
22
+
23
+ def __init__(self, settings: Optional[Settings] = None) -> None:
24
+ """Initailize the LinterCmd class.
25
+
26
+ Args:
27
+ settings:
28
+ A Settings object that contains any default, overridden, or custom settings
29
+ required anywhere in the application.
30
+ """
31
+ self.rules = Rules(settings=settings)
32
+
33
+ @staticmethod
34
+ def extend_parser(
35
+ subparsers: argparse._SubParsersAction,
36
+ ) -> argparse._SubParsersAction:
37
+ """Extends the CLI subparser with the options for LintCmd.
38
+
39
+ Add 'lint' as a subcommand along with its options and arguments
40
+
41
+ Args:
42
+ subparsers:
43
+ The main argument parser to add subcommands and arguments to
44
+ """
45
+ parser_lint = subparsers.add_parser(
46
+ "lint",
47
+ help="Verify that a GitHub Action Workflow follows all of the Rules.",
48
+ )
49
+ parser_lint.add_argument(
50
+ "-s",
51
+ "--strict",
52
+ action="store_true",
53
+ help="return non-zero exit code on warnings as well as errors",
54
+ )
55
+ parser_lint.add_argument("-f", "--files", action="append", help="files to lint")
56
+ parser_lint.add_argument(
57
+ "--output",
58
+ action="store",
59
+ help="output format: [stdout|json|md]",
60
+ default="stdout",
61
+ )
62
+ return subparsers
63
+
64
+ def get_max_error_level(self, findings: list[LintFinding]) -> int:
65
+ """Get max error level from list of findings.
66
+
67
+ Compute the maximum error level to determine the exit code required.
68
+ # if max(error) return exit(1); else return exit(0)
69
+
70
+ Args:
71
+ findings:
72
+ All of the findings that the linter found while linting a workflow.
73
+
74
+ Return:
75
+ The numeric value of the maximum lint finding
76
+ """
77
+ if len(findings) == 0:
78
+ return 0
79
+ return max(findings, key=lambda finding: finding.level.code).level.code
80
+
81
+ def lint_file(self, filename: str) -> int:
82
+ """Lint a single workflow.
83
+
84
+ Run all of the Workflow, Job, and Step level rules that have been enabled.
85
+
86
+ Args:
87
+ filename:
88
+ The name of the file that contains the workflow to lint
89
+
90
+ Returns:
91
+ The maximum error level found in the file (none, warning, error) to
92
+ calculate the exit code from.
93
+ """
94
+ findings = []
95
+ max_error_level = 0
96
+
97
+ print(f"Linting: {filename}")
98
+ workflow = WorkflowBuilder.build(filename)
99
+
100
+ for rule in self.rules.workflow:
101
+ findings.append(rule.execute(workflow))
102
+
103
+ for _, job in workflow.jobs.items():
104
+ for rule in self.rules.job:
105
+ findings.append(rule.execute(job))
106
+
107
+ if job.steps is not None:
108
+ for step in job.steps:
109
+ for rule in self.rules.step:
110
+ findings.append(rule.execute(step))
111
+
112
+ findings = list(filter(lambda a: a is not None, findings))
113
+
114
+ if len(findings) > 0:
115
+ for finding in findings:
116
+ print(f" - {finding}")
117
+ print()
118
+
119
+ max_error_level = self.get_max_error_level(findings)
120
+
121
+ return max_error_level
122
+
123
+ def generate_files(self, files: list[str]) -> list[str]:
124
+ """Generate the list of files to lint.
125
+
126
+ Searches the list of directory and/or files taken from the CLI.
127
+
128
+ Args:
129
+ files:
130
+ list of file names or directory names.
131
+
132
+ Returns:
133
+ A sorted set of all workflow files in the path(s) specified.
134
+ """
135
+ workflow_files = []
136
+ for path in files:
137
+ if os.path.isfile(path):
138
+ workflow_files.append(path)
139
+ elif os.path.isdir(path):
140
+ for subdir, _, files in os.walk(path):
141
+ for filename in files:
142
+ filepath = subdir + os.sep + filename
143
+ if filepath.endswith((".yml", ".yaml")):
144
+ workflow_files.append(filepath)
145
+
146
+ return sorted(set(workflow_files))
147
+
148
+ def run(self, input_files: list[str], strict: bool = False) -> int:
149
+ """Execute the LinterCmd.
150
+
151
+ Args:
152
+ input_files:
153
+ list of file names or directory names.
154
+ strict:
155
+ fail on WARNING instead of succeed
156
+
157
+ Returns
158
+ The return_code for the entire CLI to indicate success/failure
159
+ """
160
+ files = self.generate_files(input_files)
161
+
162
+ if len(input_files) > 0:
163
+ return_code = reduce(
164
+ lambda a, b: a if a > b else b, map(self.lint_file, files)
165
+ )
166
+
167
+ if return_code == 1 and not strict:
168
+ return_code = 0
169
+
170
+ return return_code
171
+ else:
172
+ print(f'File(s)/Directory: "{input_files}" does not exist, exiting.')
173
+ return -1
@@ -0,0 +1,146 @@
1
+ """Module to load for Workflows and Rules."""
2
+
3
+ import importlib
4
+
5
+ from typing import List, Optional
6
+
7
+ from ruamel.yaml import YAML
8
+ from ruamel.yaml.comments import CommentedMap
9
+
10
+ from .models.job import Job
11
+ from .models.step import Step
12
+ from .models.workflow import Workflow
13
+ from .rule import Rule
14
+ from .utils import Settings
15
+
16
+
17
+ yaml = YAML()
18
+
19
+
20
+ class WorkflowBuilderError(Exception):
21
+ """Exception to indicate an error with the WorkflowBuilder."""
22
+
23
+ pass
24
+
25
+
26
+ class WorkflowBuilder:
27
+ """Collection of methods to build Workflow objects."""
28
+
29
+ @classmethod
30
+ def __load_workflow_from_file(cls, filename: str) -> CommentedMap:
31
+ """Load YAML from disk.
32
+
33
+ Args:
34
+ filename:
35
+ The name of the YAML file to read.
36
+
37
+ Returns:
38
+ A CommentedMap that contains the dict() representation of the
39
+ YAML file. It includes the comments as a part of their respective
40
+ objects (depending on their location in the file).
41
+ """
42
+ with open(filename, encoding="utf8") as file:
43
+ return yaml.load(file)
44
+
45
+ @classmethod
46
+ def __build_workflow(cls, loaded_yaml: CommentedMap) -> Workflow:
47
+ """Parse the YAML and build out the workflow to run Rules against.
48
+
49
+ Args:
50
+ loaded_yaml:
51
+ YAML that was loaded from either code or a file
52
+
53
+ Returns
54
+ A Workflow to run linting Rules against
55
+ """
56
+ return Workflow.init("", loaded_yaml)
57
+
58
+ @classmethod
59
+ def build(
60
+ cls,
61
+ filename: Optional[str] = None,
62
+ workflow: Optional[CommentedMap] = None,
63
+ from_file: bool = True,
64
+ ) -> Workflow:
65
+ """Build a Workflow from either code or a file.
66
+
67
+ This is a method that assists in testing by abstracting the disk IO
68
+ and allows for passing in a YAML object in code.
69
+
70
+ Args:
71
+ filename:
72
+ The name of the file to load the YAML workflow from
73
+ yaml:
74
+ Pre-loaded YAML of a workflow
75
+ from_file:
76
+ Flag to determine if the YAML has already been loaded or needs to
77
+ be loaded from disk
78
+ """
79
+ if from_file and filename is not None:
80
+ return cls.__build_workflow(cls.__load_workflow_from_file(filename))
81
+ elif not from_file and workflow is not None:
82
+ return cls.__build_workflow(workflow)
83
+
84
+ raise WorkflowBuilderError(
85
+ "The workflow must either be built from a file or from a CommentedMap"
86
+ )
87
+
88
+
89
+ class LoadRulesError(Exception):
90
+ """Exception to indicate an error with loading rules."""
91
+
92
+ pass
93
+
94
+
95
+ class Rules:
96
+ """A collection of all of the types of rules.
97
+
98
+ Rules is used as a collection of which Rules apply to which parts of the
99
+ workflow. It also assists in making sure the Rules that apply to multiple
100
+ types are not skipped.
101
+ """
102
+
103
+ workflow: List[Rule] = []
104
+ job: List[Rule] = []
105
+ step: List[Rule] = []
106
+
107
+ def __init__(self, settings: Settings) -> None:
108
+ """Initializes the Rules
109
+
110
+ Args:
111
+ settings:
112
+ A Settings object that contains any default, overridden, or custom settings
113
+ required anywhere in the application.
114
+ """
115
+ # [TODO]: data resiliency
116
+ for rule in settings.enabled_rules:
117
+ module_name = rule.split(".")
118
+ module_name = ".".join(module_name[:-1])
119
+ rule_name = rule.split(".")[-1]
120
+
121
+ try:
122
+ rule_class = getattr(importlib.import_module(module_name), rule_name)
123
+ rule_inst = rule_class(settings=settings)
124
+
125
+ if Workflow in rule_inst.compatibility:
126
+ self.workflow.append(rule_inst)
127
+ if Job in rule_inst.compatibility:
128
+ self.job.append(rule_inst)
129
+ if Step in rule_inst.compatibility:
130
+ self.step.append(rule_inst)
131
+ except LoadRulesError as err:
132
+ print(f"Error loading: {rule}\n{err}")
133
+
134
+ def list(self) -> None:
135
+ """Print the loaded Rules."""
136
+ print("===== Loaded Rules =====")
137
+ print("workflow rules:")
138
+ for rule in self.workflow:
139
+ print(f" - {type(rule).__name__}")
140
+ print("job rules:")
141
+ for rule in self.job:
142
+ print(f" - {type(rule).__name__}")
143
+ print("step rules:")
144
+ for rule in self.step:
145
+ print(f" - {type(rule).__name__}")
146
+ print("========================\n")
File without changes
@@ -0,0 +1,56 @@
1
+ """Representation for a job in a GitHub Action workflow."""
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import List, Optional, Self
5
+
6
+ from dataclasses_json import config, dataclass_json, Undefined
7
+ from ruamel.yaml.comments import CommentedMap
8
+
9
+ from .step import Step
10
+
11
+
12
+ @dataclass_json(undefined=Undefined.EXCLUDE)
13
+ @dataclass
14
+ class Job:
15
+ """Represents a job in a GitHub Action workflow.
16
+
17
+ This object contains all of the data that is required to run the current linting
18
+ Rules against. If a new Rule requires a key that is missing, the attribute should
19
+ be added to this class to make it available for use in linting.
20
+ """
21
+
22
+ runs_on: Optional[str] = field(metadata=config(field_name="runs-on"), default=None)
23
+ key: Optional[str] = None
24
+ name: Optional[str] = None
25
+ env: Optional[CommentedMap] = None
26
+ steps: Optional[List[Step]] = None
27
+ uses: Optional[str] = None
28
+ uses_path: Optional[str] = None
29
+ uses_ref: Optional[str] = None
30
+ uses_with: Optional[CommentedMap] = field(
31
+ metadata=config(field_name="with"), default=None
32
+ )
33
+
34
+ @classmethod
35
+ def init(cls: Self, key: str, data: CommentedMap) -> Self:
36
+ """Custom dataclass constructor to map job data to a Job."""
37
+ init_data = {
38
+ "key": key,
39
+ "name": data["name"] if "name" in data else None,
40
+ "runs-on": data["runs-on"] if "runs-on" in data else None,
41
+ "env": data["env"] if "env" in data else None,
42
+ }
43
+
44
+ new_job = cls.from_dict(init_data)
45
+
46
+ if "steps" in data:
47
+ new_job.steps = [
48
+ Step.init(idx, new_job.key, step_data)
49
+ for idx, step_data in enumerate(data["steps"])
50
+ ]
51
+ else:
52
+ new_job.uses = data["uses"].replace("\n", "")
53
+ if "@" in new_job.uses:
54
+ new_job.uses_path, new_job.uses_ref = new_job.uses.split("@")
55
+
56
+ return new_job
@@ -0,0 +1,48 @@
1
+ """Representation for a job step in a GitHub Action workflow."""
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional, Self
5
+
6
+ from dataclasses_json import config, dataclass_json, Undefined
7
+ from ruamel.yaml.comments import CommentedMap
8
+
9
+
10
+ @dataclass_json(undefined=Undefined.EXCLUDE)
11
+ @dataclass
12
+ class Step:
13
+ """Represents a step in a GitHub Action workflow job.
14
+
15
+ This object contains all of the data that is required to run the current linting
16
+ Rules against. If a new Rule requires a key that is missing, the attribute should
17
+ be added to this class to make it available for use in linting.
18
+ """
19
+
20
+ key: Optional[int] = None
21
+ job: Optional[str] = None
22
+ name: Optional[str] = None
23
+ env: Optional[CommentedMap] = None
24
+ uses: Optional[str] = None
25
+ uses_path: Optional[str] = None
26
+ uses_ref: Optional[str] = None
27
+ uses_comment: Optional[str] = None
28
+ uses_version: Optional[str] = None
29
+ uses_with: Optional[CommentedMap] = field(
30
+ metadata=config(field_name="with"), default=None
31
+ )
32
+ run: Optional[str] = None
33
+
34
+ @classmethod
35
+ def init(cls: Self, idx: int, job: str, data: CommentedMap) -> Self:
36
+ """Custom dataclass constructor to map a job step data to a Step."""
37
+ new_step = cls.from_dict(data)
38
+
39
+ new_step.key = idx
40
+ new_step.job = job
41
+
42
+ if "uses" in data.ca.items and data.ca.items["uses"][2]:
43
+ new_step.uses_comment = data.ca.items["uses"][2].value.replace("\n", "")
44
+ if "@" in new_step.uses:
45
+ new_step.uses_path, new_step.uses_ref = new_step.uses.split("@")
46
+ new_step.uses_version = new_step.uses_comment.split(" ")[-1]
47
+
48
+ return new_step
@@ -0,0 +1,45 @@
1
+ """Representation for an entire GitHub Action workflow."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Dict, Optional, Self
5
+
6
+ from dataclasses_json import dataclass_json, Undefined
7
+ from ruamel.yaml.comments import CommentedMap
8
+
9
+ from .job import Job
10
+
11
+
12
+ @dataclass_json(undefined=Undefined.EXCLUDE)
13
+ @dataclass
14
+ class Workflow:
15
+ """Represents an entire workflow in a GitHub Action workflow.
16
+
17
+ This object contains all of the data that is required to run the current linting
18
+ Rules against. If a new Rule requires a key that is missing, the attribute should
19
+ be added to this class to make it available for use in linting.
20
+
21
+ See src/models/job.py for an example if the key in the workflow data does not map
22
+ one-to-one in the model (ex. 'with' => 'uses_with')
23
+ """
24
+
25
+ key: str = ""
26
+ name: Optional[str] = None
27
+ on: Optional[CommentedMap] = None
28
+ jobs: Optional[Dict[str, Job]] = None
29
+
30
+ @classmethod
31
+ def init(cls: Self, key: str, data: CommentedMap) -> Self:
32
+ init_data = {
33
+ "key": key,
34
+ "name": data["name"] if "name" in data else None,
35
+ "on": data["on"] if "on" in data else None,
36
+ }
37
+
38
+ new_workflow = cls.from_dict(init_data)
39
+
40
+ new_workflow.jobs = {
41
+ str(job_key): Job.init(job_key, job)
42
+ for job_key, job in data["jobs"].items()
43
+ }
44
+
45
+ return new_workflow
@@ -0,0 +1,101 @@
1
+ """Base Rule class to build rules by extending."""
2
+
3
+ from typing import List, Optional, Tuple, Union
4
+
5
+ from .models.workflow import Workflow
6
+ from .models.job import Job
7
+ from .models.step import Step
8
+ from .utils import LintFinding, LintLevels, Settings
9
+
10
+
11
+ class RuleExecutionException(Exception):
12
+ """Exception for the Base Rule class."""
13
+
14
+ pass
15
+
16
+
17
+ class Rule:
18
+ """Base class of a Rule to extend to create a linting Rule."""
19
+
20
+ on_fail: LintLevels = LintLevels.ERROR
21
+ compatibility: List[Union[Workflow, Job, Step]] = [Workflow, Job, Step]
22
+ settings: Optional[Settings] = None
23
+
24
+ def fn(self, obj: Union[Workflow, Job, Step]) -> Tuple[bool, str]:
25
+ """Execute the Rule (this should be overridden in the extending class.
26
+
27
+ Args:
28
+ obj:
29
+ The object that the Rule is to be run against
30
+
31
+ Returns:
32
+ The success/failure of the result of the Rule ran on the input.
33
+ """
34
+ return False, f"{obj.name}: <default fail message>"
35
+
36
+ def build_lint_message(self, message: str, obj: Union[Workflow, Job, Step]) -> str:
37
+ """Build the lint failure message.
38
+
39
+ Build the lint failure message depending on the type of object that the
40
+ Rule is being run against.
41
+
42
+ Args:
43
+ message:
44
+ The message body of the failure
45
+ obj:
46
+ The object the Rule is being run against
47
+
48
+ Returns:
49
+ The type specific failure message
50
+ """
51
+ obj_type = type(obj)
52
+
53
+ if obj_type == Step:
54
+ return f"{obj_type.__name__} [{obj.job}.{obj.key}] => {message}"
55
+ elif obj_type == Job:
56
+ return f"{obj_type.__name__} [{obj.key}] => {message}"
57
+ else:
58
+ return f"{obj_type.__name__} => {message}"
59
+
60
+ def execute(self, obj: Union[Workflow, Job, Step]) -> Union[LintFinding, None]:
61
+ """Wrapper function to execute the overridden self.fn().
62
+
63
+ Run the Rule against the object and return the results. The result
64
+ could be an Exception message where the Rule cannot be run against
65
+ the object for whatever reason. If an exception doesn't occur, the
66
+ result is linting success or failure.
67
+
68
+ Args:
69
+ obj:
70
+ The object the Rule is being run against
71
+
72
+ Returns:
73
+ A LintFinding object that contains the message to print to the user
74
+ and a LintLevel that contains the level of error to calculate the
75
+ exit code with.
76
+ """
77
+ message = None
78
+
79
+ if type(obj) not in self.compatibility:
80
+ return LintFinding(
81
+ self.build_lint_message(
82
+ f"{type(obj).__name__} not compatible with {type(self).__name__}",
83
+ obj,
84
+ ),
85
+ LintLevels.ERROR,
86
+ )
87
+
88
+ try:
89
+ passed, message = self.fn(obj)
90
+
91
+ if passed:
92
+ return None
93
+ except RuleExecutionException as err:
94
+ return LintFinding(
95
+ self.build_lint_message(
96
+ f"failed to apply {type(self).__name__}\n{err}", obj
97
+ ),
98
+ LintLevels.ERROR,
99
+ )
100
+
101
+ return LintFinding(self.build_lint_message(message, obj), self.on_fail)
File without changes
@@ -0,0 +1,72 @@
1
+ """A Rule to enforce prefixes environment variables."""
2
+
3
+ from typing import Optional, Tuple
4
+
5
+ from ..models.job import Job
6
+ from ..rule import Rule
7
+ from ..utils import LintLevels, Settings
8
+
9
+
10
+ class RuleJobEnvironmentPrefix(Rule):
11
+ """Rule to enforce specific prefixes for environment variables.
12
+
13
+ Automated testing is not easily written for GitHub Action Workflows. CI can also
14
+ get complicated really quickly and take up hundreds of lines. All of this can
15
+ make it very difficult to debug and troubleshoot, especially when environment
16
+ variables can be set in four different places: Workflow level, Job level, Step
17
+ level, and inside a shell Step.
18
+
19
+ To alleviate some of the pain, we have decided that all Job level environment
20
+ variables should be prefixed with an underscore. All Workflow environment
21
+ variables are normally at the top of the file and Step level ones are pretty
22
+ visible when debugging a shell Step.
23
+ """
24
+
25
+ def __init__(self, settings: Optional[Settings] = None) -> None:
26
+ """RuleJobEnvironmentPrefix constructor to override the Rule class.
27
+
28
+ Args:
29
+ settings:
30
+ A Settings object that contains any default, overridden, or custom settings
31
+ required anywhere in the application.
32
+ """
33
+ self.message = "Job environment vars should start with an underscore:"
34
+ self.on_fail = LintLevels.ERROR
35
+ self.compatibility = [Job]
36
+ self.settings = settings
37
+
38
+ def fn(self, obj: Job) -> Tuple[bool, str]:
39
+ """Enforces the underscore prefix standard on job envs.
40
+
41
+ Example:
42
+ ---
43
+ on:
44
+ workflow_dispatch:
45
+
46
+ jobs:
47
+ job-key:
48
+ runs-on: ubuntu-22.04
49
+ env:
50
+ _TEST_ENV: "test"
51
+ steps:
52
+ - run: echo test
53
+
54
+ All keys under jobs.job-key.env should be prefixed with an underscore
55
+ as in _TEST_ENV.
56
+
57
+ See tests/rules/test_job_environment_prefix.py for examples of
58
+ incorrectly named environment variables.
59
+ """
60
+ correct = True
61
+
62
+ if obj.env:
63
+ offending_keys = []
64
+ for key in obj.env.keys():
65
+ if key[0] != "_":
66
+ offending_keys.append(key)
67
+ correct = False
68
+
69
+ if correct:
70
+ return True, ""
71
+
72
+ return False, f"{self.message} ({' ,'.join(offending_keys)})"