valediction 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. valediction/__init__.py +8 -0
  2. valediction/convenience.py +50 -0
  3. valediction/data_types/__init__.py +0 -0
  4. valediction/data_types/data_type_helpers.py +75 -0
  5. valediction/data_types/data_types.py +58 -0
  6. valediction/data_types/type_inference.py +541 -0
  7. valediction/datasets/__init__.py +0 -0
  8. valediction/datasets/datasets.py +870 -0
  9. valediction/datasets/datasets_helpers.py +46 -0
  10. valediction/demo/DEMO - Data Dictionary.xlsx +0 -0
  11. valediction/demo/DEMOGRAPHICS.csv +101 -0
  12. valediction/demo/DIAGNOSES.csv +650 -0
  13. valediction/demo/LAB_TESTS.csv +1001 -0
  14. valediction/demo/VITALS.csv +1001 -0
  15. valediction/demo/__init__.py +6 -0
  16. valediction/demo/demo_dictionary.py +129 -0
  17. valediction/dictionary/__init__.py +0 -0
  18. valediction/dictionary/exporting.py +501 -0
  19. valediction/dictionary/exporting_helpers.py +371 -0
  20. valediction/dictionary/generation.py +357 -0
  21. valediction/dictionary/helpers.py +174 -0
  22. valediction/dictionary/importing.py +494 -0
  23. valediction/dictionary/integrity.py +37 -0
  24. valediction/dictionary/model.py +582 -0
  25. valediction/dictionary/template/PROJECT - Data Dictionary.xltx +0 -0
  26. valediction/exceptions.py +22 -0
  27. valediction/integrity.py +97 -0
  28. valediction/io/__init__.py +0 -0
  29. valediction/io/csv_readers.py +307 -0
  30. valediction/progress.py +206 -0
  31. valediction/support.py +72 -0
  32. valediction/validation/__init__.py +0 -0
  33. valediction/validation/helpers.py +315 -0
  34. valediction/validation/issues.py +280 -0
  35. valediction/validation/validation.py +598 -0
  36. valediction-1.0.0.dist-info/METADATA +15 -0
  37. valediction-1.0.0.dist-info/RECORD +38 -0
  38. valediction-1.0.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,315 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ from typing import List
5
+
6
+ from numpy import flatnonzero, round
7
+ from pandas import NA, DataFrame, Series, to_datetime, to_numeric
8
+ from pandas.util import hash_pandas_object
9
+
10
+ from valediction.data_types.data_types import DataType
11
+ from valediction.dictionary.model import Table
12
+ from valediction.integrity import get_config
13
+ from valediction.validation.issues import Range
14
+
15
+
16
+ # Remove Nulls
17
+ def _set_nulls(df: DataFrame) -> DataFrame:
18
+ null_values = get_config().null_values
19
+ token_set = {str(t).strip().casefold() for t in null_values}
20
+ columns = df.select_dtypes(include=["string", "object"]).columns
21
+ for column in columns:
22
+ series = df[column]
23
+ mask = series.notna() & series.str.casefold().isin(token_set)
24
+ df[column] = series.mask(mask, NA)
25
+
26
+ return df
27
+
28
+
29
+ # Check for Nulls
30
+ def _column_has_values(column: Series):
31
+ return column.notna().any()
32
+
33
+
34
+ # Range Setting
35
+ def mask_to_ranges(mask: Series, start_row: int) -> list[Range]:
36
+ """Convert a boolean mask (over the current chunk) into 0-based contiguous
37
+ ranges."""
38
+ idx = flatnonzero(mask.to_numpy())
39
+ if idx.size == 0:
40
+ return []
41
+ ranges: List[Range] = []
42
+ run_start = idx[0]
43
+ prev = idx[0]
44
+ for i in idx[1:]:
45
+ if i == prev + 1:
46
+ prev = i
47
+ continue
48
+ ranges.append(Range(start=start_row + run_start, end=start_row + prev))
49
+ run_start = prev = i
50
+ ranges.append(Range(start=start_row + run_start, end=start_row + prev))
51
+ return ranges
52
+
53
+
54
+ # PK Hashes
55
+ def create_pk_hashes(
56
+ df_primaries: DataFrame,
57
+ ) -> Series:
58
+ """For PK hash collision assessment, compute a deterministic 128-bit hash per row
59
+ over the provided PK columns. This is created by computing two 64-bit hashes.
60
+
61
+ forwards and backwards and then combining them. Rows with any NA across PK
62
+ components are returned as None - flagging these for NULL violations.
63
+
64
+
65
+ Args:
66
+ df_primaries (DataFrame): DataFrame
67
+
68
+ Returns:
69
+ Series: Pandas Series with hashes or Nulls.
70
+ """
71
+ hash_col_name = "PK_HASH"
72
+ if df_primaries.empty or df_primaries.shape[1] == 0:
73
+ return Series([], dtype=object, name=hash_col_name)
74
+
75
+ # Any NA in row => invalid PK -> None
76
+ null_rows = df_primaries.isna().any(axis=1)
77
+
78
+ # First Hash
79
+ hash_1 = hash_pandas_object(df_primaries, index=False) # uint64
80
+
81
+ # Second Hash (rows backwards if single row, else salt)
82
+ if df_primaries.shape[1] > 1:
83
+ df_primaries_backwards = df_primaries.iloc[:, ::-1]
84
+ else:
85
+ s = df_primaries.iloc[:, 0]
86
+ salt = Series(["§"] * len(s), index=s.index, dtype="string")
87
+ df_primaries_backwards = DataFrame(
88
+ {
89
+ "_a": s,
90
+ "_b": s.str.cat(salt),
91
+ }
92
+ )
93
+
94
+ hash_2 = hash_pandas_object(df_primaries_backwards, index=False) # uint64
95
+
96
+ a1 = hash_1.to_numpy(dtype="uint64", copy=False).astype(object)
97
+ a2 = hash_2.to_numpy(dtype="uint64", copy=False).astype(object)
98
+
99
+ combined = (a1 << 64) | a2
100
+ hashes = Series(
101
+ combined, index=df_primaries.index, name=hash_col_name, dtype=object
102
+ )
103
+ hashes[null_rows] = None
104
+ return hashes
105
+
106
+
107
+ def compute_pk_masks(pk_hashes: Series, seen_hashes: set[int]) -> dict[str, Series]:
108
+ """Compute masks for PK hashes that are either null or have been seen before.
109
+
110
+ Args:
111
+ pk_hashes (Series): Series of PK hashes.
112
+ seen_hashes (set[int]): Set of hashes that have been seen before.
113
+
114
+ Returns:
115
+ dict[str, Series]: Dictionary for boolean masks:
116
+ - null: rows where PK is None / NA
117
+ - dup_full: rows that are part of a within-chunk duplicate group
118
+ - cross_full: rows whose hash was seen in previous chunks (excluding dup_full)
119
+ - new_first_full: rows that are the first occurrence of a hash
120
+ """
121
+
122
+ s = pk_hashes
123
+ null = s.isna()
124
+ valid = ~null
125
+ if not valid.any():
126
+ # empty/default masks
127
+ return {
128
+ "null": null,
129
+ "in_chunk_collision": null,
130
+ "cross_chunk_collision": null,
131
+ "first_appearance": null,
132
+ }
133
+
134
+ s_valid = s[valid]
135
+
136
+ # Within-chunk duplicate membership (mark *all* members)
137
+ dup_local = s_valid.duplicated(keep=False)
138
+
139
+ # Across-chunk duplicates (exclude those already in a local dup group)
140
+ seen_local = s_valid.isin(seen_hashes)
141
+ cross_local = seen_local & ~dup_local
142
+
143
+ # New first occurrences in this chunk (first time we see the hash here, and not seen before)
144
+ first_local = ~s_valid.duplicated(keep="first")
145
+ new_first_local = first_local & ~seen_local
146
+
147
+ # Lift back to full length masks
148
+ in_chunk_collision = valid.copy()
149
+ in_chunk_collision.loc[valid] = dup_local
150
+
151
+ cross_chunk_collision = valid.copy()
152
+ cross_chunk_collision.loc[valid] = cross_local
153
+
154
+ first_appearance = valid.copy()
155
+ first_appearance.loc[valid] = new_first_local
156
+
157
+ return {
158
+ "null": null,
159
+ "in_chunk_collision": in_chunk_collision,
160
+ "cross_chunk_collision": cross_chunk_collision,
161
+ "first_appearance": first_appearance,
162
+ }
163
+
164
+
165
+ # PK Whitespace
166
+ def pk_contains_whitespace_mask(df_primaries: DataFrame) -> Series:
167
+ if df_primaries.empty or df_primaries.shape[1] == 0:
168
+ return Series(False, index=df_primaries.index)
169
+
170
+ col_masks = df_primaries.apply(lambda s: s.str.contains(r"\s", na=False))
171
+
172
+ return col_masks.any(axis=1)
173
+
174
+
175
+ # Data Type Checks Numeric
176
+ def invalid_mask_integer(column: Series, *, tolerance: float = 1e-12) -> Series:
177
+ """True where a non-null value cannot be treated as an integer without losing non-
178
+ zero remainder.
179
+
180
+ Accepts scientific notation (e.g. '1e2').
181
+ """
182
+ notnull = column.notna()
183
+ numeric = to_numeric(column, errors="coerce")
184
+ invalid = notnull & numeric.isna()
185
+
186
+ conversion_mask = notnull & numeric.notna()
187
+ if conversion_mask.any():
188
+ vals = numeric[conversion_mask].astype("float64")
189
+ frac = (vals - round(vals)).abs()
190
+ invalid_conv = frac > tolerance
191
+ invalid = invalid.copy()
192
+ invalid.loc[conversion_mask] = invalid_conv.values
193
+ return invalid
194
+
195
+
196
+ def invalid_mask_float(column: Series) -> Series:
197
+ """True where non-null value is not convertible to a number."""
198
+ notnull = column.notna()
199
+ num = to_numeric(column, errors="coerce")
200
+ return notnull & num.isna()
201
+
202
+
203
+ # Data Type Checks Date
204
+ def _allowed_formats_for(dtype: DataType) -> list[str]:
205
+ """Return the list of formats from Config.date_formats allowed for the given
206
+ DataType."""
207
+ config = get_config()
208
+ return [fmt for fmt, data_type in config.date_formats.items() if data_type == dtype]
209
+
210
+
211
+ def _parse_ok_any(column: Series, formats: list[str]) -> Series:
212
+ """
213
+ Vectorised check: True for values that parse under at least one of `formats`.
214
+ """
215
+ if not formats:
216
+ return Series(False, index=column.index)
217
+ ok_any = Series(False, index=column.index)
218
+ for fmt in formats:
219
+ parsed = to_datetime(column, format=fmt, errors="coerce", utc=False)
220
+ ok_any = ok_any | parsed.notna()
221
+ return ok_any
222
+
223
+
224
+ def invalid_mask_date(column: Series, fmt: str | None) -> Series:
225
+ """Must not contain a non-zero time component."""
226
+ notnull = column.notna()
227
+
228
+ if fmt:
229
+ parsed = to_datetime(column, format=fmt, errors="coerce", utc=False)
230
+ ok = parsed.notna()
231
+ has_time = ok & (
232
+ (parsed.dt.hour != 0)
233
+ | (parsed.dt.minute != 0)
234
+ | (parsed.dt.second != 0)
235
+ | (parsed.dt.microsecond != 0)
236
+ )
237
+ return notnull & (~ok | has_time)
238
+
239
+ allowed = _allowed_formats_for(DataType.DATE)
240
+ ok_any = _parse_ok_any(column, allowed)
241
+ return notnull & (~ok_any)
242
+
243
+
244
+ def invalid_mask_datetime(column: Series, fmt: str | None) -> Series:
245
+ notnull = column.notna()
246
+
247
+ if fmt:
248
+ parsed = to_datetime(column, format=fmt, errors="coerce", utc=False)
249
+ ok = parsed.notna()
250
+ return notnull & (~ok)
251
+
252
+ allowed = _allowed_formats_for(DataType.DATETIME)
253
+ ok_any = _parse_ok_any(column, allowed)
254
+ return notnull & (~ok_any)
255
+
256
+
257
+ # Other Text Checks
258
+ def invalid_mask_text_too_long(column: Series, max_len: int) -> Series:
259
+ if max_len is None or max_len <= 0:
260
+ # treat as unlimited length
261
+ return Series(False, index=column.index)
262
+
263
+ notnull = column.notna()
264
+ lens = column.str.len()
265
+ return notnull & (lens > max_len)
266
+
267
+
268
+ def invalid_mask_text_forbidden_characters(column: Series) -> Series:
269
+ forbidden = get_config().forbidden_characters
270
+ if not forbidden:
271
+ return column.notna() & False
272
+
273
+ pattern = "[" + re.escape("".join(forbidden)) + "]"
274
+ notnull = column.notna()
275
+ has_forbidden = column.str.contains(pattern, regex=True, na=False)
276
+ return notnull & has_forbidden
277
+
278
+
279
+ # Apply Data Types #
280
+ def apply_data_types(df: DataFrame, table_dictionary: Table) -> DataFrame:
281
+ # name -> column object
282
+ column_dictionary = {column.name: column for column in table_dictionary}
283
+
284
+ for col in df.columns:
285
+ data_type = column_dictionary.get(col).data_type
286
+ datetime_format = column_dictionary.get(col).datetime_format
287
+
288
+ if data_type in (DataType.TEXT, DataType.FILE):
289
+ df[col] = df[col].astype("string")
290
+
291
+ elif data_type == DataType.INTEGER:
292
+ # Accepts '12', '12.0', '1e2' etc.; validation guarantees integer-equivalent
293
+ nums = to_numeric(df[col], errors="raise")
294
+ df[col] = nums.round().astype("Int64")
295
+
296
+ elif data_type == DataType.FLOAT:
297
+ nums = to_numeric(df[col], errors="raise")
298
+ df[col] = nums.astype("Float64")
299
+
300
+ elif data_type == DataType.DATE:
301
+ dtv = to_datetime(
302
+ df[col], format=datetime_format, errors="raise", utc=False
303
+ )
304
+ df[col] = dtv.dt.normalize() # midnight
305
+
306
+ elif data_type == DataType.DATETIME:
307
+ df[col] = to_datetime(
308
+ df[col], format=datetime_format, errors="raise", utc=False
309
+ )
310
+
311
+ else:
312
+ # Fallback: keep as string
313
+ df[col] = df[col].astype("string")
314
+
315
+ return df
@@ -0,0 +1,280 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from enum import Enum
5
+ from typing import Iterable, Iterator, Optional
6
+
7
+ from pandas import DataFrame, concat
8
+
9
+ from valediction.datasets.datasets_helpers import DatasetItemLike
10
+ from valediction.io.csv_readers import CsvReadConfig, read_csv_ranges
11
+ from valediction.support import _normalise_name, list_as_bullets
12
+
13
+
14
+ class IssueType(Enum):
15
+ # Column / schema
16
+ MISSING_COLUMN = "MissingColumn"
17
+ EXTRA_COLUMN = "ExtraColumn"
18
+ FULLY_NULL_COLUMN = "FullyNullColumn"
19
+
20
+ # Keys
21
+ PK_NULL = "PrimaryKeyNull"
22
+ PK_COLLISION = "PrimaryKeyCollision"
23
+ PK_WHITESPACE = "PrimaryKeyContainsWhitespace"
24
+
25
+ # Types / content
26
+ TYPE_MISMATCH = "TypeMismatch"
27
+ TEXT_TOO_LONG = "TextTooLong"
28
+ FORBIDDEN_CHARACTER = "ForbiddenCharacter"
29
+
30
+
31
+ # Settings
32
+ APPLIES_WHOLE_COLUMN = {
33
+ IssueType.MISSING_COLUMN,
34
+ IssueType.EXTRA_COLUMN,
35
+ IssueType.FULLY_NULL_COLUMN,
36
+ }
37
+
38
+ PRIMARY_KEY_ISSUES = {
39
+ IssueType.PK_NULL,
40
+ IssueType.PK_COLLISION,
41
+ IssueType.PK_WHITESPACE,
42
+ }
43
+
44
+
45
+ @dataclass
46
+ class Range:
47
+ start: int
48
+ end: int
49
+
50
+ def __init__(self, start: int, end: int):
51
+ self.start: int = int(start)
52
+ self.end: int = int(end)
53
+
54
+
55
+ @dataclass
56
+ class Issue:
57
+ """
58
+ Summary:
59
+ Dataclass representing an issue in the dataset.
60
+
61
+ Attributes:
62
+ type (IssueType): type of issue
63
+ table (str): name of the table where the issue was detected
64
+ column (str | None): name of the column where the issue was detected, or None if not applicable
65
+ ranges (list[Range]): list of contiguous ranges of rows where the issue was detected
66
+ parent (DatasetItemLike | None): parent dataset item, or None if not applicable
67
+ """
68
+
69
+ type: IssueType
70
+ table: str
71
+ column: str | None
72
+ ranges: list[Range] = field(default_factory=list)
73
+ parent: DatasetItemLike | None = None
74
+
75
+ # Magic
76
+ def __repr__(self) -> str:
77
+ column_part = f", column={self.column!r}" if self.column is not None else ""
78
+ sum_ranges = sum(r.end - r.start + 1 for r in self.ranges)
79
+ sum_range_part = f", total={sum_ranges}" if sum_ranges else ""
80
+ return f"Issue(type={self.type.value!r}, table={self.table!r}{column_part}{sum_range_part})"
81
+
82
+ # Methods
83
+ def add_ranges(self, new_ranges: Iterable[Range]) -> None:
84
+ """
85
+ Summary:
86
+ Merge new contiguous/overlapping ranges into self.ranges (kept sorted).
87
+
88
+ Arguments:
89
+ new_ranges (Iterable[Range]): new contiguous/overlapping ranges to be merged into self.ranges
90
+
91
+ Raises:
92
+ ValueError: if new_ranges is empty
93
+ """
94
+ all_ranges = self.ranges + list(new_ranges)
95
+ if not all_ranges:
96
+ self.ranges = []
97
+ return
98
+ all_ranges.sort(key=lambda r: (r.start, r.end))
99
+ merged: list[Range] = []
100
+ cur = all_ranges[0]
101
+ for r in all_ranges[1:]:
102
+ if r.start <= cur.end + 1: # contiguous/overlap
103
+ cur.end = max(cur.end, r.end)
104
+ else:
105
+ merged.append(cur)
106
+ cur = r
107
+ merged.append(cur)
108
+ self.ranges = merged
109
+
110
+ def inspect(
111
+ self,
112
+ additional_columns: bool | str | list[str] | None = None,
113
+ chunk_size: int = 1_000_000,
114
+ print_header: bool = True,
115
+ ) -> DataFrame | str:
116
+ """
117
+ Summary:
118
+ Inspect an issue in the dataset by returning a DataFrame containing the relevant values.
119
+
120
+ Arguments:
121
+ additional_columns (bool | str | list[str] | None): whether to include additional columns in the DataFrame
122
+ - if True, include all columns
123
+ - if str or list[str], include only the specified columns
124
+ - if None, do not include any additional columns
125
+ chunk_size (int): the number of rows to include in the DataFrame at a time
126
+ print_header (bool): whether to print the issue details as a header
127
+
128
+ Returns:
129
+ DataFrame: a DataFrame containing the relevant rows of the dataset
130
+
131
+ Raises:
132
+ ValueError: if the issue has no parent DatasetItem
133
+ """
134
+ # Guard
135
+ if not self.parent:
136
+ raise ValueError("Issue has no parent DatasetItem")
137
+ header = self.__repr__() if print_header else ""
138
+ # Not applicable
139
+ if self.type in APPLIES_WHOLE_COLUMN:
140
+ print(f"{header}: applies to whole column")
141
+ return None
142
+
143
+ # Column Inclusion
144
+ if print_header:
145
+ print(f"{header}:")
146
+ if additional_columns is True:
147
+ columns = None
148
+ else:
149
+ additional_columns = (
150
+ [additional_columns]
151
+ if isinstance(additional_columns, str)
152
+ else additional_columns
153
+ )
154
+ base = (
155
+ set(self.parent.primary_keys)
156
+ if self.type in PRIMARY_KEY_ISSUES
157
+ else {self.column}
158
+ )
159
+ base |= set(additional_columns or [])
160
+ base.discard(None)
161
+ columns = list(base) if base else None
162
+
163
+ if not self.ranges:
164
+ return DataFrame(columns=columns) if columns else DataFrame()
165
+
166
+ spans: list[tuple[int, int]] = [(r.start, r.end) for r in self.ranges]
167
+
168
+ # DataFrame source: slice directly
169
+ if self.parent.is_dataframe:
170
+ df: DataFrame = self.parent.data
171
+ n = len(df)
172
+ if n == 0:
173
+ return DataFrame(columns=columns) if columns else DataFrame()
174
+
175
+ # Clamp spans to df length; build parts
176
+ parts: list[DataFrame] = []
177
+ for s, e in spans:
178
+ if s > e or s >= n or e < 0:
179
+ continue
180
+ lo = max(0, s)
181
+ hi = min(n - 1, e)
182
+ part: DataFrame = df.iloc[lo : hi + 1]
183
+ parts.append(part if columns is None else part.loc[:, columns])
184
+
185
+ if not parts:
186
+ return DataFrame(columns=columns) if columns else DataFrame()
187
+ return concat(parts, axis=0, ignore_index=False)
188
+
189
+ # CSV source: delegate reading to csv_readers
190
+ if self.parent.is_path:
191
+ path = self.parent.data
192
+ cfg = CsvReadConfig(usecols=columns)
193
+ out = read_csv_ranges(path, spans, cfg=cfg, chunk_size=chunk_size)
194
+
195
+ return out if columns is None else out.loc[:, columns]
196
+
197
+
198
+ @dataclass
199
+ class Issues:
200
+ """List-like container holding Issues with case-insensitive get and range
201
+ merging."""
202
+
203
+ # Magic
204
+ def __init__(self) -> None:
205
+ self._items: list[Issue] = []
206
+ self._index: dict[
207
+ tuple[str, Optional[str], IssueType], Issue
208
+ ] = {} # table, column, issue_type
209
+
210
+ def __iter__(self) -> Iterator[Issue]:
211
+ return iter(self._items)
212
+
213
+ def __len__(self) -> int:
214
+ return len(self._items)
215
+
216
+ def __bool__(self) -> bool:
217
+ return bool(self._items)
218
+
219
+ def __getitem__(self, idx) -> Issue | list[Issue]:
220
+ return self._items[idx]
221
+
222
+ def __repr__(self) -> str:
223
+ if not self._items:
224
+ return "Issues([])"
225
+ issues = list_as_bullets(elements=[repr(item) for item in self._items])
226
+ return f"Issues({issues}\n)"
227
+
228
+ # Methods
229
+ def add(
230
+ self,
231
+ issue_type: IssueType,
232
+ table: str,
233
+ column: str | None = None,
234
+ ranges: Iterable[Range] | None = None,
235
+ parent: DatasetItemLike | None = None,
236
+ ) -> Issue:
237
+ key = (
238
+ _normalise_name(table),
239
+ _normalise_name(column) if column is not None else None,
240
+ issue_type,
241
+ )
242
+ issue = self._index.get(key)
243
+ if issue is None:
244
+ issue = Issue(type=issue_type, table=table, column=column, parent=parent)
245
+ self._items.append(issue)
246
+ self._index[key] = issue
247
+ if ranges:
248
+ issue.add_ranges(ranges)
249
+ return issue
250
+
251
+ def get(
252
+ self,
253
+ table: str,
254
+ column: str | None = None,
255
+ issue_type: IssueType | None = None,
256
+ ) -> list[Issue]:
257
+ """Case-insensitive filter; any arg can be None to act as a wildcard."""
258
+ table = _normalise_name(table)
259
+ column = _normalise_name(column) if column is not None else None
260
+ output: list[Issue] = []
261
+ if issue_type is not None:
262
+ # direct index lookup where possible
263
+ key = (table, column, issue_type)
264
+ hit = self._index.get(key)
265
+ if hit:
266
+ output.append(hit)
267
+ return output
268
+
269
+ # otherwise scan (still cheap; we maintain a compact list)
270
+ for item in self._items:
271
+ if _normalise_name(item.table) != table:
272
+ continue
273
+ if column is not None and (_normalise_name(item.column) or "") != column:
274
+ continue
275
+ output.append(item)
276
+ return output
277
+
278
+ def extend(self, issues: Issues) -> None:
279
+ for issue in issues:
280
+ self.add(issue.type, issue.table, issue.column, issue.ranges, issue.parent)