troubadix 25.12.4__py3-none-any.whl → 26.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. troubadix/__version__.py +1 -1
  2. troubadix/argparser.py +4 -14
  3. troubadix/helper/date_format.py +3 -7
  4. troubadix/helper/helper.py +1 -3
  5. troubadix/helper/if_block_parser.py +14 -37
  6. troubadix/helper/linguistic_exception_handler.py +6 -18
  7. troubadix/helper/patterns.py +7 -19
  8. troubadix/helper/remove_comments.py +1 -4
  9. troubadix/helper/text_utils.py +1 -3
  10. troubadix/plugin.py +3 -9
  11. troubadix/plugins/__init__.py +6 -20
  12. troubadix/plugins/badwords.py +3 -9
  13. troubadix/plugins/copyright_text.py +2 -5
  14. troubadix/plugins/copyright_year.py +4 -12
  15. troubadix/plugins/creation_date.py +3 -9
  16. troubadix/plugins/cvss_format.py +1 -3
  17. troubadix/plugins/dependencies.py +2 -5
  18. troubadix/plugins/dependency_category_order.py +7 -20
  19. troubadix/plugins/deprecated_dependency.py +6 -16
  20. troubadix/plugins/deprecated_functions.py +1 -2
  21. troubadix/plugins/double_end_points.py +2 -7
  22. troubadix/plugins/duplicate_oid.py +1 -3
  23. troubadix/plugins/forking_nasl_functions.py +1 -4
  24. troubadix/plugins/get_kb_on_services.py +2 -4
  25. troubadix/plugins/grammar.py +6 -16
  26. troubadix/plugins/http_links_in_tags.py +1 -3
  27. troubadix/plugins/illegal_characters.py +5 -13
  28. troubadix/plugins/log_messages.py +1 -2
  29. troubadix/plugins/malformed_dependencies.py +2 -6
  30. troubadix/plugins/missing_desc_exit.py +1 -3
  31. troubadix/plugins/multiple_re_parameters.py +2 -6
  32. troubadix/plugins/newlines.py +1 -2
  33. troubadix/plugins/overlong_description_lines.py +2 -6
  34. troubadix/plugins/prod_svc_detect_in_vulnvt.py +1 -4
  35. troubadix/plugins/script_add_preference_id.py +3 -10
  36. troubadix/plugins/script_add_preference_type.py +2 -7
  37. troubadix/plugins/script_calls_empty_values.py +3 -3
  38. troubadix/plugins/script_calls_recommended.py +5 -8
  39. troubadix/plugins/script_copyright.py +1 -3
  40. troubadix/plugins/script_family.py +1 -2
  41. troubadix/plugins/script_tag_form.py +1 -3
  42. troubadix/plugins/script_tag_whitespaces.py +4 -8
  43. troubadix/plugins/script_tags_mandatory.py +2 -5
  44. troubadix/plugins/script_version_and_last_modification_tags.py +6 -17
  45. troubadix/plugins/script_xref_form.py +1 -3
  46. troubadix/plugins/script_xref_url.py +3 -7
  47. troubadix/plugins/security_messages.py +6 -17
  48. troubadix/plugins/severity_date.py +3 -9
  49. troubadix/plugins/severity_format.py +1 -3
  50. troubadix/plugins/severity_origin.py +1 -3
  51. troubadix/plugins/solution_text.py +6 -10
  52. troubadix/plugins/solution_type.py +1 -2
  53. troubadix/plugins/spaces_before_dots.py +2 -8
  54. troubadix/plugins/spaces_in_filename.py +1 -2
  55. troubadix/plugins/spelling.py +5 -14
  56. troubadix/plugins/trailing_spaces_tabs.py +2 -5
  57. troubadix/plugins/using_display.py +2 -6
  58. troubadix/plugins/valid_oid.py +51 -60
  59. troubadix/plugins/valid_script_tag_names.py +2 -5
  60. troubadix/plugins/variable_assigned_in_if.py +2 -7
  61. troubadix/plugins/variable_redefinition_in_foreach.py +2 -6
  62. troubadix/plugins/vt_placement.py +2 -8
  63. troubadix/reporter.py +6 -19
  64. troubadix/results.py +2 -8
  65. troubadix/runner.py +5 -14
  66. troubadix/standalone_plugins/allowed_rev_diff.py +8 -25
  67. troubadix/standalone_plugins/changed_creation_date.py +3 -9
  68. troubadix/standalone_plugins/changed_cves.py +4 -12
  69. troubadix/standalone_plugins/changed_oid.py +2 -6
  70. troubadix/standalone_plugins/changed_packages/changed_packages.py +3 -8
  71. troubadix/standalone_plugins/changed_packages/marker/changed_update.py +1 -3
  72. troubadix/standalone_plugins/changed_packages/marker/dropped_architecture.py +1 -3
  73. troubadix/standalone_plugins/changed_packages/package.py +2 -5
  74. troubadix/standalone_plugins/dependency_graph/checks.py +5 -15
  75. troubadix/standalone_plugins/dependency_graph/dependency_graph.py +5 -13
  76. troubadix/standalone_plugins/deprecate_vts.py +3 -9
  77. troubadix/standalone_plugins/file_extensions.py +3 -10
  78. troubadix/standalone_plugins/last_modification.py +3 -9
  79. troubadix/standalone_plugins/no_solution.py +12 -32
  80. troubadix/standalone_plugins/version_updated.py +4 -12
  81. troubadix/troubadix.py +1 -4
  82. {troubadix-25.12.4.dist-info → troubadix-26.1.0.dist-info}/METADATA +1 -1
  83. troubadix-26.1.0.dist-info/RECORD +116 -0
  84. troubadix-25.12.4.dist-info/RECORD +0 -116
  85. {troubadix-25.12.4.dist-info → troubadix-26.1.0.dist-info}/WHEEL +0 -0
  86. {troubadix-25.12.4.dist-info → troubadix-26.1.0.dist-info}/entry_points.txt +0 -0
  87. {troubadix-25.12.4.dist-info → troubadix-26.1.0.dist-info}/licenses/LICENSE +0 -0
troubadix/__version__.py CHANGED
@@ -2,4 +2,4 @@
2
2
 
3
3
  # THIS IS AN AUTOGENERATED FILE. DO NOT TOUCH!
4
4
 
5
- __version__ = "25.12.4"
5
+ __version__ = "26.1.0"
troubadix/argparser.py CHANGED
@@ -98,9 +98,7 @@ def parse_args(
98
98
  "-f",
99
99
  "--full",
100
100
  action="store_true",
101
- help=(
102
- "Checking the complete VT directory and not only the added/changed scripts"
103
- ),
101
+ help=("Checking the complete VT directory and not only the added/changed scripts"),
104
102
  )
105
103
 
106
104
  parser.add_argument(
@@ -167,9 +165,7 @@ def parse_args(
167
165
  parser.add_argument(
168
166
  "--non-recursive",
169
167
  action="store_true",
170
- help=(
171
- 'Don\'t run the script recursive. Only usable with "-f"/"--full" or "-d"/"--dirs"'
172
- ),
168
+ help=('Don\'t run the script recursive. Only usable with "-f"/"--full" or "-d"/"--dirs"'),
173
169
  )
174
170
 
175
171
  parser.add_argument(
@@ -233,9 +229,7 @@ def parse_args(
233
229
  dest="n_jobs",
234
230
  default=max(1, cpu_count() // 2),
235
231
  type=check_cpu_count,
236
- help=(
237
- "Define number of jobs, that should run simultaneously. Default: %(default)s"
238
- ),
232
+ help=("Define number of jobs, that should run simultaneously. Default: %(default)s"),
239
233
  )
240
234
 
241
235
  parser.add_argument(
@@ -262,11 +256,7 @@ def parse_args(
262
256
  )
263
257
  sys.exit(1)
264
258
 
265
- if (
266
- not parsed_args.full
267
- and not parsed_args.dirs
268
- and parsed_args.non_recursive
269
- ):
259
+ if not parsed_args.full and not parsed_args.dirs and parsed_args.non_recursive:
270
260
  terminal.warning(
271
261
  "'Argument '--non-recursive' is only usable with '-f'/'--full' or '-d'/'--dirs'"
272
262
  )
@@ -11,9 +11,7 @@ def parse_date(date: str) -> datetime:
11
11
  return datetime.strptime(date[:25], "%Y-%m-%d %H:%M:%S %z")
12
12
 
13
13
 
14
- def check_date(
15
- date: str, date_name: str, file: str, plugin: str
16
- ) -> Iterator[LinterResult]:
14
+ def check_date(date: str, date_name: str, file: str, plugin: str) -> Iterator[LinterResult]:
17
15
  """
18
16
  Checks if a given date string is correctly formatted.
19
17
  Example: "2017-11-29 13:56:41 +0100 (Wed, 29 Nov 2017)"
@@ -58,8 +56,7 @@ def check_date(
58
56
  elif week_day_str != week_day_parsed:
59
57
  formatted_date = week_day_parsed
60
58
  yield LinterError(
61
- f"Wrong day of week. Please change it from '{week_day_str}"
62
- f"' to '{formatted_date}'.",
59
+ f"Wrong day of week. Please change it from '{week_day_str}" f"' to '{formatted_date}'.",
63
60
  file=file,
64
61
  plugin=plugin,
65
62
  )
@@ -79,8 +76,7 @@ def compare_date_with_last_modification_date(
79
76
  try:
80
77
  if parse_date(date) > parse_date(last_mod_date):
81
78
  yield LinterError(
82
- f"The {date_name} must not be greater than "
83
- "last_modification date.",
79
+ f"The {date_name} must not be greater than " "last_modification date.",
84
80
  file=file,
85
81
  plugin=plugin,
86
82
  )
@@ -35,9 +35,7 @@ SCRIPT_CATEGORIES = {
35
35
  }
36
36
 
37
37
 
38
- def is_ignore_file(
39
- file_name: Union[Path, str], ignore_files: Union[List[Path], List[str]]
40
- ) -> bool:
38
+ def is_ignore_file(file_name: Union[Path, str], ignore_files: Union[List[Path], List[str]]) -> bool:
41
39
  for ignore_file in ignore_files:
42
40
  if str(ignore_file) in str(file_name):
43
41
  return True
@@ -31,9 +31,7 @@ class IfStatement:
31
31
  class IfErrorType(Enum):
32
32
  UNCLOSED_CONDITION = "Unclosed parenthesis in if condition at line {line}"
33
33
  UNCLOSED_BODY = "Unclosed brace in if body at line {line}"
34
- MISSING_OUTCOME = (
35
- "Missing statement or body after if condition at line {line}"
36
- )
34
+ MISSING_OUTCOME = "Missing statement or body after if condition at line {line}"
37
35
  TERMINATED_AFTER_CONDITION = (
38
36
  "Semicolon after if condition at line {line} causes if to terminate early. "
39
37
  "Following block will always execute."
@@ -85,18 +83,12 @@ class IfParser:
85
83
  opening_brace, CONDITION_BRACES
86
84
  )
87
85
  if condition_error:
88
- errors.append(
89
- IfParseError(line=line, error_type=condition_error)
90
- )
86
+ errors.append(IfParseError(line=line, error_type=condition_error))
91
87
  continue
92
- condition = self.file_content[
93
- opening_brace + 1 : condition_end
94
- ].strip()
88
+ condition = self.file_content[opening_brace + 1 : condition_end].strip()
95
89
 
96
90
  # Step 3: Find the start of the outcome (first non-whitespace after condition)
97
- outcome_start, outcome_error = self._find_outcome_start(
98
- condition_end
99
- )
91
+ outcome_start, outcome_error = self._find_outcome_start(condition_end)
100
92
  if outcome_error:
101
93
  errors.append(IfParseError(line=line, error_type=outcome_error))
102
94
  continue
@@ -104,26 +96,18 @@ class IfParser:
104
96
  # Step 4: Determine if this is a body or single-expression statement
105
97
  if self.file_content[outcome_start] == "{":
106
98
  # Body: find closing brace for body '}'
107
- body_end, body_error = self._find_closing_brace(
108
- outcome_start, BODY_BRACES
109
- )
99
+ body_end, body_error = self._find_closing_brace(outcome_start, BODY_BRACES)
110
100
  if body_error:
111
- errors.append(
112
- IfParseError(line=line, error_type=body_error)
113
- )
101
+ errors.append(IfParseError(line=line, error_type=body_error))
114
102
  continue
115
103
  if_end = body_end + 1
116
104
  outcome_start = outcome_start + 1 # exclude opening brace
117
105
  outcome_end = body_end
118
106
  else:
119
107
  # Single statement: find end of statement ';'
120
- statement_end, statement_error = self._find_statement_end(
121
- outcome_start
122
- )
108
+ statement_end, statement_error = self._find_statement_end(outcome_start)
123
109
  if statement_error:
124
- errors.append(
125
- IfParseError(line=line, error_type=statement_error)
126
- )
110
+ errors.append(IfParseError(line=line, error_type=statement_error))
127
111
  continue
128
112
  if_end = statement_end + 1
129
113
  outcome_end = statement_end
@@ -194,15 +178,12 @@ class IfParser:
194
178
  continue
195
179
 
196
180
  # check for if with word boundary, valid: ["if", " if"], not valid: "xif"
197
- if (
198
- i == 0 or not self.file_content[i - 1].isalnum()
199
- ) and self.file_content.startswith("if", i):
181
+ if (i == 0 or not self.file_content[i - 1].isalnum()) and self.file_content.startswith(
182
+ "if", i
183
+ ):
200
184
  # skip whitespace
201
185
  j = i + 2
202
- while (
203
- j < len(self.file_content)
204
- and self.file_content[j].isspace()
205
- ):
186
+ while j < len(self.file_content) and self.file_content[j].isspace():
206
187
  j += 1
207
188
  # check for condition start
208
189
  if j < len(self.file_content) and self.file_content[j] == "(":
@@ -210,9 +191,7 @@ class IfParser:
210
191
 
211
192
  return starts
212
193
 
213
- def _find_outcome_start(
214
- self, condition_end: int
215
- ) -> tuple[int | None, IfErrorType | None]:
194
+ def _find_outcome_start(self, condition_end: int) -> tuple[int | None, IfErrorType | None]:
216
195
  """
217
196
  Find the start of the outcome/then part after the condition (next non-whitespace character).
218
197
  """
@@ -228,9 +207,7 @@ class IfParser:
228
207
 
229
208
  return pos, None
230
209
 
231
- def _find_statement_end(
232
- self, statement_start: int
233
- ) -> tuple[int | None, IfErrorType | None]:
210
+ def _find_statement_end(self, statement_start: int) -> tuple[int | None, IfErrorType | None]:
234
211
  """Find the end of a single statement (semicolon outside of strings)."""
235
212
  string_state = StringState()
236
213
 
@@ -90,16 +90,12 @@ class PatternsCheck(LinguisticCheck):
90
90
  # but that throws an exception because Tuple[...]
91
91
  # is only valid for type hinting :/
92
92
  if isinstance(patterns[0], Tuple):
93
- self.patterns = [
94
- re.compile(pattern, flags=flags) for pattern, flags in patterns
95
- ]
93
+ self.patterns = [re.compile(pattern, flags=flags) for pattern, flags in patterns]
96
94
  else:
97
95
  self.patterns = [re.compile(pattern) for pattern in patterns]
98
96
 
99
97
  def execute(self, file_path: str, correction: str):
100
- return any(
101
- bool(pattern.search(correction)) for pattern in self.patterns
102
- )
98
+ return any(bool(pattern.search(correction)) for pattern in self.patterns)
103
99
 
104
100
 
105
101
  class CompositeCheck(LinguisticCheck):
@@ -112,9 +108,7 @@ class CompositeCheck(LinguisticCheck):
112
108
  self.checks = checks
113
109
 
114
110
  def execute(self, file_path: str, correction: str):
115
- return all(
116
- check.execute(file_path, correction) for check in self.checks
117
- )
111
+ return all(check.execute(file_path, correction) for check in self.checks)
118
112
 
119
113
 
120
114
  class TextInFileCheck(CompositeCheck):
@@ -131,9 +125,7 @@ class PatternInFileCheck(CompositeCheck):
131
125
  and the file contains the specified file path
132
126
  """
133
127
 
134
- def __init__(
135
- self, file: str, pattern: str, flags: re.RegexFlag = 0
136
- ) -> None:
128
+ def __init__(self, file: str, pattern: str, flags: re.RegexFlag = 0) -> None:
137
129
  super().__init__(FileCheck(file), PatternCheck(pattern, flags))
138
130
 
139
131
 
@@ -155,9 +147,7 @@ class PatternInFilesCheck(CompositeCheck):
155
147
  and the file matches any of the specified file paths
156
148
  """
157
149
 
158
- def __init__(
159
- self, files: List[str], pattern: str, flags: re.RegexFlag = 0
160
- ) -> None:
150
+ def __init__(self, files: List[str], pattern: str, flags: re.RegexFlag = 0) -> None:
161
151
  super().__init__(FilesCheck(files), PatternCheck(pattern, flags))
162
152
 
163
153
 
@@ -196,9 +186,7 @@ class PatternsInFilePatternCheck(CompositeCheck):
196
186
  )
197
187
 
198
188
 
199
- def handle_linguistic_checks(
200
- file: str, correction: str, checks: Iterable[LinguisticCheck]
201
- ) -> bool:
189
+ def handle_linguistic_checks(file: str, correction: str, checks: Iterable[LinguisticCheck]) -> bool:
202
190
  """Determinates if any of the provided checks pass
203
191
  for the provided file and correction
204
192
 
@@ -42,9 +42,7 @@ _TAG_PATTERN = (
42
42
  )
43
43
 
44
44
 
45
- def _get_tag_pattern(
46
- name: str, *, value: str = r".+?", flags: re.RegexFlag = 0
47
- ) -> re.Pattern:
45
+ def _get_tag_pattern(name: str, *, value: str = r".+?", flags: re.RegexFlag = 0) -> re.Pattern:
48
46
  """
49
47
  The returned pattern catches all
50
48
  `script_tags(name="{name}", value="{value}");`
@@ -89,8 +87,7 @@ __script_tag_pattern = None
89
87
  __DATE_VALUE = r"[A-Za-z0-9\:\-\+\,\s\(\)]{44}"
90
88
  __CVSS_V2_BASE_VECTOR = r"AV:[LAN]/AC:[HML]/Au:[NSM]/C:[NPC]/I:[NPC]/A:[NPC]"
91
89
  __CVSS_V3_BASE_VECTOR = (
92
- r"CVSS:3.[01]/AV:[NALP]/AC:[LH]/PR:[NLH]/UI:[NR]/S:[UC]"
93
- r"/C:[HLN]/I:[HLN]/A:[HLN]"
90
+ r"CVSS:3.[01]/AV:[NALP]/AC:[LH]/PR:[NLH]/UI:[NR]/S:[UC]" r"/C:[HLN]/I:[HLN]/A:[HLN]"
94
91
  )
95
92
  __CVSS_V4_BASE_VECTOR = (
96
93
  r"CVSS:4.0/AV:[NALP]/AC:[LH]/AT:[NP]/PR:[NLH]/UI:[NPA]"
@@ -102,9 +99,7 @@ __script_tag_values = {
102
99
  ScriptTag.CVSS_BASE: r"(10\.0|[0-9]\.[0-9])",
103
100
  ScriptTag.CVSS_BASE_VECTOR: __CVSS_V2_BASE_VECTOR,
104
101
  ScriptTag.SEVERITY_VECTOR: (
105
- rf"({__CVSS_V2_BASE_VECTOR})|"
106
- rf"({__CVSS_V3_BASE_VECTOR})|"
107
- rf"({__CVSS_V4_BASE_VECTOR})"
102
+ rf"({__CVSS_V2_BASE_VECTOR})|" rf"({__CVSS_V3_BASE_VECTOR})|" rf"({__CVSS_V4_BASE_VECTOR})"
108
103
  ),
109
104
  ScriptTag.SEVERITY_ORIGIN: r"(NVD|Vendor|Third Party|Greenbone)",
110
105
  ScriptTag.SEVERITY_DATE: __DATE_VALUE,
@@ -127,9 +122,7 @@ def init_script_tag_patterns() -> None:
127
122
  value = r".+?"
128
123
  flags = re.MULTILINE | re.DOTALL
129
124
 
130
- __script_tag_pattern[tag] = _get_tag_pattern(
131
- name=tag.value, value=value, flags=flags
132
- )
125
+ __script_tag_pattern[tag] = _get_tag_pattern(name=tag.value, value=value, flags=flags)
133
126
 
134
127
 
135
128
  def get_script_tag_patterns() -> Dict[ScriptTag, re.Pattern]:
@@ -162,9 +155,7 @@ _XREF_TAG_PATTERN = (
162
155
  )
163
156
 
164
157
 
165
- def get_xref_pattern(
166
- name: str, *, value: str = r".+?", flags: re.RegexFlag = 0
167
- ) -> re.Pattern:
158
+ def get_xref_pattern(name: str, *, value: str = r".+?", flags: re.RegexFlag = 0) -> re.Pattern:
168
159
  """
169
160
  The returned pattern catches all
170
161
  `script_xref(name="{name}", value="{value}");`
@@ -186,8 +177,7 @@ def get_xref_pattern(
186
177
 
187
178
 
188
179
  _SPECIAL_TAG_PATTERN = (
189
- r'script_(?P<name>{name})\s*\((?P<quote99>[\'"])?(?P<value>{value})'
190
- r"(?P=quote99)?\s*\)\s*;"
180
+ r'script_(?P<name>{name})\s*\((?P<quote99>[\'"])?(?P<value>{value})' r"(?P=quote99)?\s*\)\s*;"
191
181
  )
192
182
 
193
183
 
@@ -228,9 +218,7 @@ def _get_special_script_tag_pattern(
228
218
  Returns
229
219
  `re.Pattern` object
230
220
  """
231
- return re.compile(
232
- _SPECIAL_TAG_PATTERN.format(name=name, value=value), flags=flags
233
- )
221
+ return re.compile(_SPECIAL_TAG_PATTERN.format(name=name, value=value), flags=flags)
234
222
 
235
223
 
236
224
  __PORT_VALUE = r"\"(?P<service>[\w\s])+\", (?P<port>\d{1,5})"
@@ -20,10 +20,7 @@ def remove_comments(file_content: str) -> str:
20
20
  """
21
21
  string_state = StringState()
22
22
  return "\n".join(
23
- [
24
- _remove_comments_in_line(line, string_state)
25
- for line in file_content.splitlines()
26
- ]
23
+ [_remove_comments_in_line(line, string_state) for line in file_content.splitlines()]
27
24
  )
28
25
 
29
26
 
@@ -55,9 +55,7 @@ def index_to_linecol(text: str, index: int) -> tuple[int, int]:
55
55
  (line, column) tuple (both start at 1)
56
56
  """
57
57
  if index < 0 or index >= len(text):
58
- raise ValueError(
59
- f"Index {index} out of bounds for text of length {len(text)}"
60
- )
58
+ raise ValueError(f"Index {index} out of bounds for text of length {len(text)}")
61
59
 
62
60
  lines = text.splitlines(keepends=True)
63
61
  line_num = 0
troubadix/plugin.py CHANGED
@@ -62,9 +62,7 @@ class FilePluginContext:
62
62
  @property
63
63
  def file_content(self) -> str:
64
64
  if not self._file_content:
65
- self._file_content = self.nasl_file.read_text(
66
- encoding=CURRENT_ENCODING
67
- )
65
+ self._file_content = self.nasl_file.read_text(encoding=CURRENT_ENCODING)
68
66
  return self._file_content
69
67
 
70
68
  @property
@@ -111,14 +109,10 @@ class FileContentPlugin(FilePlugin):
111
109
  """A plugin that does checks on the whole file content"""
112
110
 
113
111
  def run(self) -> Iterator[LinterResult]:
114
- return self.check_content(
115
- self.context.nasl_file, self.context.file_content
116
- )
112
+ return self.check_content(self.context.nasl_file, self.context.file_content)
117
113
 
118
114
  @abstractmethod
119
- def check_content(
120
- self, nasl_file: Path, file_content: str
121
- ) -> Iterator[LinterResult]:
115
+ def check_content(self, nasl_file: Path, file_content: str) -> Iterator[LinterResult]:
122
116
  pass
123
117
 
124
118
 
@@ -191,24 +191,18 @@ class StandardPlugins(Plugins):
191
191
  self._check_unknown_plugins(excluded_plugins)
192
192
 
193
193
  file_plugins = self._exclude_plugins(excluded_plugins, file_plugins)
194
- files_plugins = self._exclude_plugins(
195
- excluded_plugins, files_plugins
196
- )
194
+ files_plugins = self._exclude_plugins(excluded_plugins, files_plugins)
197
195
 
198
196
  if included_plugins:
199
197
  self._check_unknown_plugins(included_plugins)
200
198
 
201
199
  file_plugins = self._include_plugins(included_plugins, file_plugins)
202
- files_plugins = self._include_plugins(
203
- included_plugins, files_plugins
204
- )
200
+ files_plugins = self._include_plugins(included_plugins, files_plugins)
205
201
 
206
202
  super().__init__(file_plugins=file_plugins, files_plugins=files_plugins)
207
203
 
208
204
  @staticmethod
209
- def _exclude_plugins(
210
- excluded: Iterable[str], plugins: Iterable[Plugin]
211
- ) -> List[Plugin]:
205
+ def _exclude_plugins(excluded: Iterable[str], plugins: Iterable[Plugin]) -> List[Plugin]:
212
206
  return [
213
207
  plugin
214
208
  for plugin in plugins
@@ -216,13 +210,9 @@ class StandardPlugins(Plugins):
216
210
  ]
217
211
 
218
212
  @staticmethod
219
- def _include_plugins(
220
- included: Iterable[str], plugins: Iterable[Plugin]
221
- ) -> List[Plugin]:
213
+ def _include_plugins(included: Iterable[str], plugins: Iterable[Plugin]) -> List[Plugin]:
222
214
  return [
223
- plugin
224
- for plugin in plugins
225
- if plugin.__name__ in included or plugin.name in included
215
+ plugin for plugin in plugins if plugin.__name__ in included or plugin.name in included
226
216
  ]
227
217
 
228
218
  @staticmethod
@@ -240,11 +230,7 @@ class StandardPlugins(Plugins):
240
230
 
241
231
  def build_message(plugin: str):
242
232
  match = difflib.get_close_matches(plugin, all_plugin_names, n=1)
243
- return (
244
- f"'{plugin}' (Did you mean '{match[0]}'?)"
245
- if match
246
- else f"'{plugin}'"
247
- )
233
+ return f"'{plugin}' (Did you mean '{match[0]}'?)" if match else f"'{plugin}'"
248
234
 
249
235
  messages = [build_message(plugin) for plugin in sorted(unknown_plugins)]
250
236
  raise ValueError(f"Unknown plugins: {', '.join(messages)}")
@@ -140,20 +140,14 @@ class CheckBadwords(LineContentPlugin):
140
140
  if any(badword in line for badword in DEFAULT_BADWORDS):
141
141
  if (
142
142
  not any(exception in line for exception in EXCEPTIONS)
143
+ and not any(line.startswith(start) for start in STARTS_WITH_EXCEPTIONS)
143
144
  and not any(
144
- line.startswith(start)
145
- for start in STARTS_WITH_EXCEPTIONS
146
- )
147
- and not any(
148
- nasl_file.name == filename and value in line
149
- for filename, value in COMBINED
145
+ nasl_file.name == filename and value in line for filename, value in COMBINED
150
146
  )
151
147
  ):
152
148
  report = f"Badword in line {i:5}: {line}"
153
149
  if "NVT" in line:
154
- report += (
155
- '\nNote/Hint: Please use the term "VT" instead.'
156
- )
150
+ report += '\nNote/Hint: Please use the term "VT" instead.'
157
151
  yield LinterError(
158
152
  report,
159
153
  plugin=self.name,
@@ -98,13 +98,10 @@ class CheckCopyrightText(FileContentPlugin):
98
98
  return
99
99
 
100
100
  nasl_file = self.context.nasl_file
101
- nasl_file.write_text(
102
- data=self.new_file_content, encoding=CURRENT_ENCODING
103
- )
101
+ nasl_file.write_text(data=self.new_file_content, encoding=CURRENT_ENCODING)
104
102
 
105
103
  yield LinterFix(
106
- f"The copyright statement has been updated to "
107
- f"{CORRECT_COPYRIGHT_PHRASE}",
104
+ f"The copyright statement has been updated to " f"{CORRECT_COPYRIGHT_PHRASE}",
108
105
  file=nasl_file,
109
106
  plugin=self.name,
110
107
  )
@@ -59,12 +59,8 @@ class CheckCopyrightYear(FileContentPlugin):
59
59
 
60
60
  name = "check_copyright_year"
61
61
 
62
- def check_content(
63
- self, nasl_file: Path, file_content: str
64
- ) -> Iterator[LinterResult]:
65
- if nasl_file.suffix == ".inc" or is_ignore_file(
66
- nasl_file, _IGNORE_FILES
67
- ):
62
+ def check_content(self, nasl_file: Path, file_content: str) -> Iterator[LinterResult]:
63
+ if nasl_file.suffix == ".inc" or is_ignore_file(nasl_file, _IGNORE_FILES):
68
64
  return
69
65
  # extract creation year from script tag
70
66
  creation_date_pattern = get_script_tag_pattern(ScriptTag.CREATION_DATE)
@@ -79,9 +75,7 @@ class CheckCopyrightYear(FileContentPlugin):
79
75
  creation_year = int(creation_date_match.group("value")[:4])
80
76
 
81
77
  # extract year in value of script_copyright tag
82
- script_copyright_pattern = get_special_script_tag_pattern(
83
- SpecialScriptTag.COPYRIGHT
84
- )
78
+ script_copyright_pattern = get_special_script_tag_pattern(SpecialScriptTag.COPYRIGHT)
85
79
  script_copyright_match = script_copyright_pattern.search(file_content)
86
80
  if not script_copyright_match:
87
81
  yield LinterError(
@@ -91,9 +85,7 @@ class CheckCopyrightYear(FileContentPlugin):
91
85
  )
92
86
  return
93
87
  copyright_tag_value = script_copyright_match.group("value")
94
- copyright_tag_match = SPDX_OR_COPYRIGHT_PATTERN.search(
95
- copyright_tag_value
96
- )
88
+ copyright_tag_match = SPDX_OR_COPYRIGHT_PATTERN.search(copyright_tag_value)
97
89
  if not copyright_tag_match:
98
90
  yield LinterError(
99
91
  "Unable to extract year from script_copyright tag in VT",
@@ -41,13 +41,9 @@ class CheckCreationDate(FileContentPlugin):
41
41
  return
42
42
 
43
43
  creation_date_pattern = get_script_tag_pattern(ScriptTag.CREATION_DATE)
44
- last_modification_pattern = get_script_tag_pattern(
45
- ScriptTag.LAST_MODIFICATION
46
- )
44
+ last_modification_pattern = get_script_tag_pattern(ScriptTag.LAST_MODIFICATION)
47
45
 
48
- if not (
49
- match_creation_date := creation_date_pattern.search(file_content)
50
- ):
46
+ if not (match_creation_date := creation_date_pattern.search(file_content)):
51
47
  yield LinterError(
52
48
  "No creation_date has been found.",
53
49
  file=nasl_file,
@@ -62,9 +58,7 @@ class CheckCreationDate(FileContentPlugin):
62
58
  self.name,
63
59
  )
64
60
 
65
- if match_last_mod_date := last_modification_pattern.search(
66
- file_content
67
- ):
61
+ if match_last_mod_date := last_modification_pattern.search(file_content):
68
62
  yield from compare_date_with_last_modification_date(
69
63
  match_creation_date.group("value"),
70
64
  "creation_date",
@@ -37,9 +37,7 @@ class CheckCVSSFormat(FileContentPlugin):
37
37
  return
38
38
 
39
39
  cvss_base_pattern = get_script_tag_pattern(ScriptTag.CVSS_BASE)
40
- cvss_base_vector_pattern = get_script_tag_pattern(
41
- ScriptTag.CVSS_BASE_VECTOR
42
- )
40
+ cvss_base_vector_pattern = get_script_tag_pattern(ScriptTag.CVSS_BASE_VECTOR)
43
41
 
44
42
  if not cvss_base_pattern.search(file_content):
45
43
  yield LinterError(
@@ -75,12 +75,9 @@ class CheckDependencies(FilePlugin):
75
75
  for match in matches:
76
76
  if match:
77
77
  for dep in split_dependencies(match.group("value")):
78
- if not any(
79
- (root / vers / dep).exists() for vers in FEED_VERSIONS
80
- ):
78
+ if not any((root / vers / dep).exists() for vers in FEED_VERSIONS):
81
79
  yield LinterError(
82
- f"The script dependency {dep} could not "
83
- "be found within the VTs.",
80
+ f"The script dependency {dep} could not " "be found within the VTs.",
84
81
  file=self.context.nasl_file,
85
82
  plugin=self.name,
86
83
  )
@@ -61,17 +61,13 @@ def check_category(
61
61
  match = pattern.search(content)
62
62
 
63
63
  if not match:
64
- raise CategoryError(
65
- f"{script}: Script category is missing or unsupported."
66
- )
64
+ raise CategoryError(f"{script}: Script category is missing or unsupported.")
67
65
 
68
66
  category_value = match.group("value")
69
67
  try:
70
68
  return VTCategory[category_value]
71
69
  except ValueError:
72
- raise CategoryError(
73
- f"{script}: Script category {category_value} is unsupported."
74
- ) from None
70
+ raise CategoryError(f"{script}: Script category {category_value} is unsupported.") from None
75
71
 
76
72
 
77
73
  class CheckDependencyCategoryOrder(FileContentPlugin):
@@ -98,9 +94,7 @@ class CheckDependencyCategoryOrder(FileContentPlugin):
98
94
  ):
99
95
  return
100
96
 
101
- category_pattern = get_special_script_tag_pattern(
102
- SpecialScriptTag.CATEGORY
103
- )
97
+ category_pattern = get_special_script_tag_pattern(SpecialScriptTag.CATEGORY)
104
98
 
105
99
  try:
106
100
  category = check_category(
@@ -116,9 +110,7 @@ class CheckDependencyCategoryOrder(FileContentPlugin):
116
110
  )
117
111
  return
118
112
 
119
- dependencies_pattern = get_special_script_tag_pattern(
120
- SpecialScriptTag.DEPENDENCIES
121
- )
113
+ dependencies_pattern = get_special_script_tag_pattern(SpecialScriptTag.DEPENDENCIES)
122
114
  matches = dependencies_pattern.finditer(file_content)
123
115
 
124
116
  if not matches:
@@ -130,9 +122,7 @@ class CheckDependencyCategoryOrder(FileContentPlugin):
130
122
  if match:
131
123
  # Remove single and/or double quotes, spaces
132
124
  # and create a list by using the comma as a separator
133
- dependencies = re.sub(
134
- r'[\'"\s]', "", match.group("value")
135
- ).split(",")
125
+ dependencies = re.sub(r'[\'"\s]', "", match.group("value")).split(",")
136
126
 
137
127
  for dep in dependencies:
138
128
  dependency_path = None
@@ -142,15 +132,12 @@ class CheckDependencyCategoryOrder(FileContentPlugin):
142
132
 
143
133
  if not dependency_path:
144
134
  yield LinterError(
145
- f"The script dependency {dep} could not "
146
- "be found within the VTs.",
135
+ f"The script dependency {dep} could not " "be found within the VTs.",
147
136
  file=nasl_file,
148
137
  plugin=self.name,
149
138
  )
150
139
  else:
151
- dependency_content = dependency_path.read_text(
152
- encoding=CURRENT_ENCODING
153
- )
140
+ dependency_content = dependency_path.read_text(encoding=CURRENT_ENCODING)
154
141
 
155
142
  try:
156
143
  dependency_category = check_category(