invar-tools 1.17.25__py3-none-any.whl → 1.17.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. invar/core/contracts.py +38 -6
  2. invar/core/doc_edit.py +22 -9
  3. invar/core/doc_parser.py +17 -11
  4. invar/core/entry_points.py +48 -42
  5. invar/core/extraction.py +25 -9
  6. invar/core/format_specs.py +7 -2
  7. invar/core/format_strategies.py +6 -4
  8. invar/core/formatter.py +9 -6
  9. invar/core/hypothesis_strategies.py +24 -5
  10. invar/core/lambda_helpers.py +2 -2
  11. invar/core/parser.py +40 -21
  12. invar/core/patterns/detector.py +25 -6
  13. invar/core/patterns/p0_exhaustive.py +5 -5
  14. invar/core/patterns/p0_literal.py +8 -8
  15. invar/core/patterns/p0_newtype.py +2 -2
  16. invar/core/patterns/p0_nonempty.py +12 -7
  17. invar/core/patterns/p0_validation.py +4 -8
  18. invar/core/patterns/registry.py +12 -2
  19. invar/core/property_gen.py +47 -23
  20. invar/core/shell_analysis.py +70 -66
  21. invar/core/strategies.py +14 -3
  22. invar/core/suggestions.py +12 -4
  23. invar/core/tautology.py +33 -10
  24. invar/core/template_parser.py +23 -15
  25. invar/core/ts_parsers.py +6 -2
  26. invar/core/ts_sig_parser.py +18 -10
  27. invar/core/utils.py +38 -12
  28. invar/shell/property_tests.py +176 -48
  29. invar/templates/protocol/python/tools.md +3 -0
  30. {invar_tools-1.17.25.dist-info → invar_tools-1.17.27.dist-info}/METADATA +1 -1
  31. {invar_tools-1.17.25.dist-info → invar_tools-1.17.27.dist-info}/RECORD +36 -36
  32. {invar_tools-1.17.25.dist-info → invar_tools-1.17.27.dist-info}/WHEEL +0 -0
  33. {invar_tools-1.17.25.dist-info → invar_tools-1.17.27.dist-info}/entry_points.txt +0 -0
  34. {invar_tools-1.17.25.dist-info → invar_tools-1.17.27.dist-info}/licenses/LICENSE +0 -0
  35. {invar_tools-1.17.25.dist-info → invar_tools-1.17.27.dist-info}/licenses/LICENSE-GPL +0 -0
  36. {invar_tools-1.17.25.dist-info → invar_tools-1.17.27.dist-info}/licenses/NOTICE +0 -0
invar/core/strategies.py CHANGED
@@ -111,7 +111,10 @@ PATTERNS: list[tuple[str, Callable[[re.Match, str], dict[str, Any] | None]]] = [
111
111
  ),
112
112
  (
113
113
  r"(-?[\d.]+(?:e[+-]?\d+)?)\s*<=\s*(\w+)\s*<=\s*(-?[\d.]+(?:e[+-]?\d+)?)",
114
- lambda m, p: {"min_value": _parse_number(m.group(1)), "max_value": _parse_number(m.group(3))}
114
+ lambda m, p: {
115
+ "min_value": _parse_number(m.group(1)),
116
+ "max_value": _parse_number(m.group(3)),
117
+ }
115
118
  if m.group(2) == p
116
119
  else None,
117
120
  ),
@@ -140,7 +143,11 @@ PATTERNS: list[tuple[str, Callable[[re.Match, str], dict[str, Any] | None]]] = [
140
143
  ]
141
144
 
142
145
 
143
- @pre(lambda pre_source, param_name, param_type=None: len(param_name) > 0) # Param must be named
146
+ @pre(
147
+ lambda pre_source, param_name, param_type=None: isinstance(pre_source, str)
148
+ and len(param_name) > 0
149
+ and (param_type is None or isinstance(param_type, type))
150
+ ) # Param must be named
144
151
  @post(lambda result: isinstance(result.constraints, dict)) # Returns valid hint
145
152
  def infer_from_lambda(
146
153
  pre_source: str,
@@ -193,7 +200,11 @@ def infer_from_lambda(
193
200
  )
194
201
 
195
202
 
196
- @pre(lambda pre_sources, param_name, param_type=None: len(param_name) > 0) # Param must be named
203
+ @pre(
204
+ lambda pre_sources, param_name, param_type=None: isinstance(pre_sources, list)
205
+ and len(param_name) > 0
206
+ and (param_type is None or isinstance(param_type, type))
207
+ ) # Param must be named
197
208
  @post(lambda result: isinstance(result.constraints, dict)) # Returns valid hint
198
209
  def infer_from_multiple(
199
210
  pre_sources: list[str],
invar/core/suggestions.py CHANGED
@@ -35,7 +35,7 @@ CONSTRAINT_PATTERNS: dict[str, list[str]] = {
35
35
  # Return-type-aware @post patterns for redundant_type_contract suggestions
36
36
  RETURN_TYPE_POST_PATTERNS: dict[str, str] = {
37
37
  "list[Violation]": '@post(lambda result: all(v.rule == "RULE_NAME" for v in result))',
38
- "list": '@post(lambda result: all(<predicate> for item in result))',
38
+ "list": "@post(lambda result: all(<predicate> for item in result))",
39
39
  "dict": "@post(lambda result: all(isinstance(k, <type>) for k in result))",
40
40
  "set": "@post(lambda result: all(<predicate> for item in result))",
41
41
  "int": "@post(lambda result: result >= 0)",
@@ -331,11 +331,18 @@ _VIOLATION_PREFIXES = {
331
331
  "missing_contract": ("Add: ", "Add: "),
332
332
  "empty_contract": ("Replace with: ", "Replace with: "),
333
333
  "redundant_type_contract": ("Replace with business logic: ", "Replace with: "),
334
- "semantic_tautology": ("Replace tautology with meaningful constraint: ", "Replace tautology with: "),
334
+ "semantic_tautology": (
335
+ "Replace tautology with meaningful constraint: ",
336
+ "Replace tautology with: ",
337
+ ),
335
338
  }
336
339
 
337
340
 
338
- @pre(lambda prefix, suggestion, patterns: bool(prefix) and bool(suggestion))
341
+ @pre(
342
+ lambda prefix, suggestion, patterns: bool(prefix)
343
+ and bool(suggestion)
344
+ and isinstance(patterns, str)
345
+ )
339
346
  @post(lambda result: isinstance(result, str) and len(result) > 0)
340
347
  def _format_with_patterns(prefix: str, suggestion: str, patterns: str) -> str:
341
348
  """Format suggestion with optional patterns.
@@ -350,7 +357,8 @@ def _format_with_patterns(prefix: str, suggestion: str, patterns: str) -> str:
350
357
 
351
358
 
352
359
  @pre(
353
- lambda symbol, violation_type: violation_type
360
+ lambda symbol, violation_type: symbol is not None
361
+ and violation_type
354
362
  in ("missing_contract", "empty_contract", "redundant_type_contract", "semantic_tautology", "")
355
363
  )
356
364
  def format_suggestion_for_violation(symbol: Symbol, violation_type: str) -> str:
invar/core/tautology.py CHANGED
@@ -61,7 +61,13 @@ def is_semantic_tautology(expression: str) -> tuple[bool, str]:
61
61
 
62
62
  # DX-38 Tier 1: Check for no-parameter lambda
63
63
  args = lambda_node.args
64
- if not args.args and not args.posonlyargs and not args.kwonlyargs and not args.vararg and not args.kwarg:
64
+ if (
65
+ not args.args
66
+ and not args.posonlyargs
67
+ and not args.kwonlyargs
68
+ and not args.vararg
69
+ and not args.kwarg
70
+ ):
65
71
  return (True, "contract has no parameters (doesn't validate function inputs)")
66
72
 
67
73
  return _check_tautology_patterns(lambda_node.body)
@@ -92,9 +98,14 @@ def _check_comparison_patterns(node: ast.expr) -> tuple[bool, str] | None:
92
98
  # Length non-negative pattern
93
99
  if len(node.comparators) == 1:
94
100
  left, op, right = node.left, node.ops[0], node.comparators[0]
95
- if (isinstance(left, ast.Call) and isinstance(left.func, ast.Name) and
96
- left.func.id == "len" and isinstance(op, ast.GtE) and
97
- isinstance(right, ast.Constant) and right.value == 0):
101
+ if (
102
+ isinstance(left, ast.Call)
103
+ and isinstance(left.func, ast.Name)
104
+ and left.func.id == "len"
105
+ and isinstance(op, ast.GtE)
106
+ and isinstance(right, ast.Constant)
107
+ and right.value == 0
108
+ ):
98
109
  arg = ast.unparse(left.args[0]) if left.args else "x"
99
110
  return (True, f"len({arg}) >= 0 is always True for any sequence")
100
111
  return None
@@ -103,8 +114,12 @@ def _check_comparison_patterns(node: ast.expr) -> tuple[bool, str] | None:
103
114
  @pre(lambda node: isinstance(node, ast.expr))
104
115
  def _check_isinstance_object(node: ast.expr) -> tuple[bool, str] | None:
105
116
  """Check for isinstance(x, object) pattern."""
106
- if (isinstance(node, ast.Call) and isinstance(node.func, ast.Name) and
107
- node.func.id == "isinstance" and len(node.args) == 2):
117
+ if (
118
+ isinstance(node, ast.Call)
119
+ and isinstance(node.func, ast.Name)
120
+ and node.func.id == "isinstance"
121
+ and len(node.args) == 2
122
+ ):
108
123
  type_arg = node.args[1]
109
124
  if isinstance(type_arg, ast.Name) and type_arg.id == "object":
110
125
  return (True, f"isinstance({ast.unparse(node.args[0])}, object) is always True")
@@ -139,7 +154,7 @@ def _check_boolop_patterns(node: ast.expr) -> tuple[bool, str] | None:
139
154
  return None
140
155
 
141
156
 
142
- @pre(lambda node: isinstance(node, ast.expr) and hasattr(node, '__class__'))
157
+ @pre(lambda node: isinstance(node, ast.expr) and hasattr(node, "__class__"))
143
158
  @post(lambda result: isinstance(result, tuple) and len(result) == 2)
144
159
  def _check_tautology_patterns(node: ast.expr) -> tuple[bool, str]:
145
160
  """Check for common tautology patterns in AST node.
@@ -157,15 +172,23 @@ def _check_tautology_patterns(node: ast.expr) -> tuple[bool, str]:
157
172
  >>> _check_tautology_patterns(ast.Constant(value=False))
158
173
  (True, 'contract always returns False (contradiction - will always fail)')
159
174
  """
160
- for checker in [_check_literal_patterns, _check_comparison_patterns,
161
- _check_isinstance_object, _check_boolop_patterns]:
175
+ for checker in [
176
+ _check_literal_patterns,
177
+ _check_comparison_patterns,
178
+ _check_isinstance_object,
179
+ _check_boolop_patterns,
180
+ ]:
162
181
  result = checker(node)
163
182
  if result:
164
183
  return result
165
184
  return (False, "")
166
185
 
167
186
 
168
- @pre(lambda file_info, config: len(file_info.path) > 0)
187
+ @pre(
188
+ lambda file_info, config: file_info is not None
189
+ and len(file_info.path) > 0
190
+ and config is not None
191
+ )
169
192
  def check_semantic_tautology(file_info: FileInfo, config: RuleConfig) -> list[Violation]:
170
193
  """Check for semantic tautology contracts. Core files only.
171
194
 
@@ -73,9 +73,7 @@ class ParsedFile:
73
73
  # Patterns for region markers
74
74
  # <!--invar:managed version="5.0"-->
75
75
  # <!--/invar:managed-->
76
- REGION_START_PATTERN = re.compile(
77
- r'<!--invar:(\w+)(?:\s+version=["\']([^"\']+)["\'])?-->'
78
- )
76
+ REGION_START_PATTERN = re.compile(r'<!--invar:(\w+)(?:\s+version=["\']([^"\']+)["\'])?-->')
79
77
  REGION_END_PATTERN = re.compile(r"<!--/invar:(\w+)-->")
80
78
 
81
79
 
@@ -157,10 +155,15 @@ def parse_invar_regions(content: str) -> ParsedFile:
157
155
  return ParsedFile(regions=regions, before=before, after=after, raw=content)
158
156
 
159
157
 
160
- @pre(lambda parsed, updates: all(k == v.name for k, v in parsed.regions.items())) # Keys must match names
161
- @ensure(lambda parsed, updates, result: (
162
- not parsed.has_regions or all(f"<!--invar:{r}" in result for r in parsed.regions)
163
- )) # Checks start tag prefix (version attribute may follow)
158
+ @pre(
159
+ lambda parsed, updates: isinstance(updates, dict)
160
+ and all(k == v.name for k, v in parsed.regions.items())
161
+ ) # Keys must match names
162
+ @ensure(
163
+ lambda parsed, updates, result: (
164
+ not parsed.has_regions or all(f"<!--invar:{r}" in result for r in parsed.regions)
165
+ )
166
+ ) # Checks start tag prefix (version attribute may follow)
164
167
  def reconstruct_file(parsed: ParsedFile, updates: dict[str, str]) -> str:
165
168
  """Reconstruct file content with updated regions.
166
169
 
@@ -359,11 +362,16 @@ def detect_claude_md_state(content: str) -> ClaudeMdState:
359
362
  project_complete = has_project_open and has_project_close
360
363
 
361
364
  # All markers present
362
- any_marker = any([
363
- has_managed_open, has_managed_close,
364
- has_user_open, has_user_close,
365
- has_project_open, has_project_close,
366
- ])
365
+ any_marker = any(
366
+ [
367
+ has_managed_open,
368
+ has_managed_close,
369
+ has_user_open,
370
+ has_user_close,
371
+ has_project_open,
372
+ has_project_close,
373
+ ]
374
+ )
367
375
 
368
376
  if not any_marker:
369
377
  return ClaudeMdState(state="missing")
@@ -424,13 +432,13 @@ def strip_invar_markers(content: str) -> str:
424
432
  """
425
433
  # Remove all <!--invar:xxx--> and <!--/invar:xxx--> markers
426
434
  # Also handle version attribute
427
- cleaned = re.sub(r'<!--/?invar:\w+[^>]*-->', '', content)
435
+ cleaned = re.sub(r"<!--/?invar:\w+[^>]*-->", "", content)
428
436
  # Clean up excessive blank lines
429
- cleaned = re.sub(r'\n{3,}', '\n\n', cleaned)
437
+ cleaned = re.sub(r"\n{3,}", "\n\n", cleaned)
430
438
  return cleaned.strip()
431
439
 
432
440
 
433
- @pre(lambda content, merge_date: len(content) > 0)
441
+ @pre(lambda content, merge_date="": len(content) > 0 and isinstance(merge_date, str))
434
442
  @post(lambda result: "MERGED CONTENT" in result)
435
443
  def format_preserved_content(content: str, merge_date: str = "") -> str:
436
444
  """Format preserved content with review markers.
invar/core/ts_parsers.py CHANGED
@@ -115,7 +115,9 @@ def parse_tsc_output(output: str) -> list[TSViolation]:
115
115
  return violations
116
116
 
117
117
 
118
- @pre(lambda output, base_path="": output is not None) # Accepts any string including empty
118
+ @pre(
119
+ lambda output, base_path="": output is not None and isinstance(base_path, str)
120
+ ) # Accepts any string including empty
119
121
  @post(lambda result: all(v.source == "eslint" for v in result))
120
122
  def parse_eslint_json(output: str, base_path: str = "") -> list[TSViolation]:
121
123
  """Parse ESLint JSON output into violations list.
@@ -194,7 +196,9 @@ def parse_eslint_json(output: str, base_path: str = "") -> list[TSViolation]:
194
196
  return violations
195
197
 
196
198
 
197
- @pre(lambda output, base_path="": output is not None) # Accepts any string including empty
199
+ @pre(
200
+ lambda output, base_path="": output is not None and isinstance(base_path, str)
201
+ ) # Accepts any string including empty
198
202
  @post(lambda result: all(v.source == "vitest" for v in result))
199
203
  def parse_vitest_json(output: str, base_path: str = "") -> list[TSViolation]:
200
204
  """Parse Vitest JSON output into violations list.
@@ -188,7 +188,9 @@ def extract_ts_signatures(source: str) -> list[TSSymbol]:
188
188
  # Find actual class line (skip decorators)
189
189
  match_text = match.group(0)
190
190
  class_keyword_pos = match_text.find("class ")
191
- actual_start = match.start() + class_keyword_pos if class_keyword_pos >= 0 else match.start()
191
+ actual_start = (
192
+ match.start() + class_keyword_pos if class_keyword_pos >= 0 else match.start()
193
+ )
192
194
  line = get_line_number(actual_start)
193
195
  line_content = lines[line - 1].strip() if line <= len(lines) else ""
194
196
  signature = line_content.rstrip("{").rstrip()
@@ -241,11 +243,13 @@ def extract_ts_signatures(source: str) -> list[TSSymbol]:
241
243
  return symbols
242
244
 
243
245
 
244
- @pre(lambda symbols, file_path="": all(s.line > 0 for s in symbols)) # All symbols have valid line numbers
246
+ @pre(
247
+ lambda symbols, file_path="": isinstance(symbols, list)
248
+ and isinstance(file_path, str)
249
+ and all(s.line > 0 for s in symbols)
250
+ ) # All symbols have valid line numbers
245
251
  @post(lambda result: "file" in result and "symbols" in result)
246
- def format_ts_signatures_json(
247
- symbols: list[TSSymbol], file_path: str = ""
248
- ) -> dict:
252
+ def format_ts_signatures_json(symbols: list[TSSymbol], file_path: str = "") -> dict:
249
253
  """Format TypeScript symbols as JSON output.
250
254
 
251
255
  Args:
@@ -277,11 +281,13 @@ def format_ts_signatures_json(
277
281
  }
278
282
 
279
283
 
280
- @pre(lambda symbols, file_path="": all(s.line > 0 for s in symbols)) # All symbols have valid line numbers
284
+ @pre(
285
+ lambda symbols, file_path="": isinstance(symbols, list)
286
+ and isinstance(file_path, str)
287
+ and all(s.line > 0 for s in symbols)
288
+ ) # All symbols have valid line numbers
281
289
  @post(lambda result: len(result) > 0) # Always produces output (at least header)
282
- def format_ts_signatures_text(
283
- symbols: list[TSSymbol], file_path: str = ""
284
- ) -> str:
290
+ def format_ts_signatures_text(symbols: list[TSSymbol], file_path: str = "") -> str:
285
291
  """Format TypeScript symbols as human-readable text.
286
292
 
287
293
  Args:
@@ -303,7 +309,9 @@ def format_ts_signatures_text(
303
309
  lines.append(f" {symbol.signature}")
304
310
  if symbol.docstring:
305
311
  # Truncate long docstrings
306
- doc = symbol.docstring[:100] + "..." if len(symbol.docstring) > 100 else symbol.docstring
312
+ doc = (
313
+ symbol.docstring[:100] + "..." if len(symbol.docstring) > 100 else symbol.docstring
314
+ )
307
315
  lines.append(f" /** {doc} */")
308
316
  lines.append("")
309
317
 
invar/core/utils.py CHANGED
@@ -15,7 +15,11 @@ from deal import post, pre
15
15
  from invar.core.models import GuardReport, RuleConfig, RuleExclusion
16
16
 
17
17
 
18
- @pre(lambda report, strict: report.files_checked >= 0 and report.errors >= 0)
18
+ @pre(
19
+ lambda report, strict: report.files_checked >= 0
20
+ and report.errors >= 0
21
+ and isinstance(strict, bool)
22
+ )
19
23
  @post(lambda result: result in (0, 1))
20
24
  def get_exit_code(report: GuardReport, strict: bool) -> int:
21
25
  """
@@ -37,7 +41,17 @@ def get_exit_code(report: GuardReport, strict: bool) -> int:
37
41
  return 0
38
42
 
39
43
 
40
- @pre(lambda report, strict, doctest_passed=True, crosshair_passed=True, property_passed=True: report.files_checked >= 0)
44
+ @pre(
45
+ lambda report,
46
+ strict,
47
+ doctest_passed=True,
48
+ crosshair_passed=True,
49
+ property_passed=True: report.files_checked >= 0
50
+ and isinstance(strict, bool)
51
+ and isinstance(doctest_passed, bool)
52
+ and isinstance(crosshair_passed, bool)
53
+ and isinstance(property_passed, bool)
54
+ )
41
55
  @post(lambda result: result in ("passed", "failed"))
42
56
  def get_combined_status(
43
57
  report: GuardReport,
@@ -83,7 +97,10 @@ def get_combined_status(
83
97
  return "passed"
84
98
 
85
99
 
86
- @pre(lambda data, source: source in ("pyproject", "invar", "invar_dir", "default"))
100
+ @pre(
101
+ lambda data, source: isinstance(data, dict)
102
+ and source in ("pyproject", "invar", "invar_dir", "default")
103
+ )
87
104
  @post(lambda result: isinstance(result, dict))
88
105
  def extract_guard_section(data: dict[str, Any], source: str) -> dict[str, Any]:
89
106
  """
@@ -115,7 +132,7 @@ def extract_guard_section(data: dict[str, Any], source: str) -> dict[str, Any]:
115
132
  return result if isinstance(result, dict) else {}
116
133
 
117
134
 
118
- @pre(lambda config, key: len(key) > 0)
135
+ @pre(lambda config, key: isinstance(config, dict) and len(key) > 0)
119
136
  @post(lambda result: result is None or isinstance(result, bool))
120
137
  def _get_bool(config: dict[str, Any], key: str) -> bool | None:
121
138
  """
@@ -132,7 +149,7 @@ def _get_bool(config: dict[str, Any], key: str) -> bool | None:
132
149
  return None
133
150
 
134
151
 
135
- @pre(lambda config, key: len(key) > 0)
152
+ @pre(lambda config, key: isinstance(config, dict) and len(key) > 0)
136
153
  @post(lambda result: result is None or isinstance(result, int))
137
154
  def _get_int(config: dict[str, Any], key: str) -> int | None:
138
155
  """
@@ -149,7 +166,7 @@ def _get_int(config: dict[str, Any], key: str) -> int | None:
149
166
  return None
150
167
 
151
168
 
152
- @pre(lambda config, key: len(key) > 0)
169
+ @pre(lambda config, key: isinstance(config, dict) and len(key) > 0)
153
170
  @post(lambda result: result is None or isinstance(result, float))
154
171
  def _get_float(config: dict[str, Any], key: str) -> float | None:
155
172
  """
@@ -168,7 +185,7 @@ def _get_float(config: dict[str, Any], key: str) -> float | None:
168
185
  return None
169
186
 
170
187
 
171
- @pre(lambda config, key: len(key) > 0)
188
+ @pre(lambda config, key: isinstance(config, dict) and len(key) > 0)
172
189
  @post(lambda result: result is None or isinstance(result, list))
173
190
  def _get_str_list(config: dict[str, Any], key: str) -> list[str] | None:
174
191
  """
@@ -204,7 +221,9 @@ def _parse_rule_exclusions(config: dict[str, Any]) -> list[RuleExclusion] | None
204
221
  if isinstance(excl, dict) and "pattern" in excl and "rules" in excl:
205
222
  pattern, rules = excl["pattern"], excl["rules"]
206
223
  if isinstance(pattern, str) and isinstance(rules, list):
207
- exclusions.append(RuleExclusion(pattern=str(pattern), rules=[str(r) for r in rules]))
224
+ exclusions.append(
225
+ RuleExclusion(pattern=str(pattern), rules=[str(r) for r in rules])
226
+ )
208
227
  return exclusions if exclusions else None
209
228
 
210
229
 
@@ -253,7 +272,14 @@ def parse_guard_config(guard_config: dict[str, Any]) -> RuleConfig:
253
272
  kwargs: dict[str, Any] = {}
254
273
 
255
274
  # Int fields
256
- for key in ("max_file_lines", "max_function_lines"):
275
+ for key in (
276
+ "max_file_lines",
277
+ "max_function_lines",
278
+ "timeout_doctest",
279
+ "timeout_hypothesis",
280
+ "timeout_crosshair",
281
+ "timeout_crosshair_per_condition",
282
+ ):
257
283
  if (val := _get_int(guard_config, key)) is not None:
258
284
  kwargs[key] = val
259
285
 
@@ -286,7 +312,7 @@ def parse_guard_config(guard_config: dict[str, Any]) -> RuleConfig:
286
312
  return RuleConfig()
287
313
 
288
314
 
289
- @pre(lambda file_path, patterns: len(file_path) > 0)
315
+ @pre(lambda file_path, patterns: len(file_path) > 0 and isinstance(patterns, list))
290
316
  def matches_pattern(file_path: str, patterns: list[str]) -> bool:
291
317
  """
292
318
  Check if a file path matches any of the glob patterns.
@@ -316,7 +342,7 @@ def matches_pattern(file_path: str, patterns: list[str]) -> bool:
316
342
  return False
317
343
 
318
344
 
319
- @pre(lambda file_path, prefixes: len(file_path) > 0)
345
+ @pre(lambda file_path, prefixes: len(file_path) > 0 and isinstance(prefixes, list))
320
346
  def matches_path_prefix(file_path: str, prefixes: list[str]) -> bool:
321
347
  """
322
348
  Check if file_path starts with any of the given prefixes.
@@ -386,7 +412,7 @@ def match_glob_pattern(file_path: str, pattern: str) -> bool:
386
412
  return False
387
413
 
388
414
 
389
- @pre(lambda file_path, config: len(file_path) > 0)
415
+ @pre(lambda file_path, config: len(file_path) > 0 and isinstance(config, RuleConfig))
390
416
  def get_excluded_rules(file_path: str, config: RuleConfig) -> set[str]:
391
417
  """
392
418
  Get the set of rules to exclude for a given file path.