rost-io 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rost_io-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,111 @@
1
+ Metadata-Version: 2.4
2
+ Name: rost-io
3
+ Version: 0.1.0
4
+ Summary: Adapter layer for .rost — converts any data source to canonical JSON
5
+ License: MIT
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: jsonschema>=4.0
9
+ Provides-Extra: csv
10
+ Provides-Extra: parquet
11
+ Requires-Dist: pyarrow>=13.0; extra == "parquet"
12
+ Provides-Extra: db
13
+ Requires-Dist: sqlalchemy>=2.0; extra == "db"
14
+ Provides-Extra: excel
15
+ Requires-Dist: openpyxl>=3.1; extra == "excel"
16
+ Provides-Extra: pandas
17
+ Requires-Dist: pandas>=2.0; extra == "pandas"
18
+ Provides-Extra: llm
19
+ Requires-Dist: instructor>=1.0; extra == "llm"
20
+ Requires-Dist: openai>=1.0; extra == "llm"
21
+ Provides-Extra: all
22
+ Requires-Dist: pyarrow>=13.0; extra == "all"
23
+ Requires-Dist: sqlalchemy>=2.0; extra == "all"
24
+ Requires-Dist: openpyxl>=3.1; extra == "all"
25
+ Requires-Dist: pandas>=2.0; extra == "all"
26
+ Requires-Dist: instructor>=1.0; extra == "all"
27
+ Requires-Dist: openai>=1.0; extra == "all"
28
+
29
+ # rost-io — Adapter layer for `.rost`
30
+
31
+ `rost-io` converts messy real-world data sources into the canonical JSON
32
+ contracts that the `.rost` compiler and solver consume.
33
+
34
+ ## Design principle
35
+
36
+ The `.rost` compiler only reads canonical JSON. It has no database drivers,
37
+ no Excel parser, no API calls. `rost-io` is the adapter layer that handles
38
+ all of that — exactly as described in `DATA_IO.md` (Hexagonal Architecture).
39
+
40
+ ```
41
+ Excel / CSV / Parquet / PostgreSQL / MySQL / PDF
42
+
43
+
44
+ rost-io adapter ← this package
45
+
46
+ │ staff.json (schema: rost/staff/v1)
47
+ │ leave.json (schema: rost/leave/v1)
48
+ │ calendar.json (schema: rost/calendar/v1)
49
+
50
+ rostc compiler → solver → solution.json
51
+ ```
52
+
53
+ ## Installation
54
+
55
+ ```bash
56
+ # Core only (CSV + JSON — no extra dependencies)
57
+ pip install rost-io
58
+
59
+ # With Parquet support (P1)
60
+ pip install "rost-io[parquet]"
61
+
62
+ # With database support — PostgreSQL + MySQL (P1)
63
+ pip install "rost-io[db]"
64
+
65
+ # With Excel support (P2)
66
+ pip install "rost-io[excel]"
67
+
68
+ # With pandas support (P2)
69
+ pip install "rost-io[pandas]"
70
+
71
+ # Everything
72
+ pip install "rost-io[all]"
73
+ ```
74
+
75
+ ## Quick start
76
+
77
+ ```python
78
+ from rost_io import CsvAdapter, validate_staff
79
+
80
+ # Convert a CSV staff export to canonical staff.json
81
+ adapter = CsvAdapter("hr_export.csv", id_col="employee_id", tags_col="roles")
82
+ staff_json = adapter.to_staff_json()
83
+
84
+ # Validate against the canonical schema
85
+ validate_staff(staff_json) # raises jsonschema.ValidationError if invalid
86
+
87
+ # Write for the compiler
88
+ import json
89
+ with open("staff.json", "w") as f:
90
+ json.dump(staff_json, f, indent=2)
91
+ ```
92
+
93
+ ## Canonical JSON schemas
94
+
95
+ | File | Schema ID | Description |
96
+ |------------------|--------------------|-------------------------------------|
97
+ | `staff.json` | `rost/staff/v1` | People + tags |
98
+ | `leave.json` | `rost/leave/v1` | Leave/absence entries |
99
+ | `calendar.json` | `rost/calendar/v1` | Date range + public holidays |
100
+ | `solution.json` | `rost/solution/v1` | Solver output (read-only for rost-io)|
101
+
102
+ ## Adapters
103
+
104
+ | Adapter | Priority | Extra dep |
105
+ |--------------------|----------|---------------------|
106
+ | `CsvAdapter` | P0 | none (stdlib) |
107
+ | `JsonAdapter` | P0 | none |
108
+ | `ParquetAdapter` | P1 | `pyarrow` |
109
+ | `DatabaseAdapter` | P1 | `sqlalchemy` |
110
+ | `ExcelAdapter` | P2 | `openpyxl` |
111
+ | `PandasAdapter` | P2 | `pandas` |
@@ -0,0 +1,83 @@
1
+ # rost-io — Adapter layer for `.rost`
2
+
3
+ `rost-io` converts messy real-world data sources into the canonical JSON
4
+ contracts that the `.rost` compiler and solver consume.
5
+
6
+ ## Design principle
7
+
8
+ The `.rost` compiler only reads canonical JSON. It has no database drivers,
9
+ no Excel parser, no API calls. `rost-io` is the adapter layer that handles
10
+ all of that — exactly as described in `DATA_IO.md` (Hexagonal Architecture).
11
+
12
+ ```
13
+ Excel / CSV / Parquet / PostgreSQL / MySQL / PDF
14
+
15
+
16
+ rost-io adapter ← this package
17
+
18
+ │ staff.json (schema: rost/staff/v1)
19
+ │ leave.json (schema: rost/leave/v1)
20
+ │ calendar.json (schema: rost/calendar/v1)
21
+
22
+ rostc compiler → solver → solution.json
23
+ ```
24
+
25
+ ## Installation
26
+
27
+ ```bash
28
+ # Core only (CSV + JSON — no extra dependencies)
29
+ pip install rost-io
30
+
31
+ # With Parquet support (P1)
32
+ pip install "rost-io[parquet]"
33
+
34
+ # With database support — PostgreSQL + MySQL (P1)
35
+ pip install "rost-io[db]"
36
+
37
+ # With Excel support (P2)
38
+ pip install "rost-io[excel]"
39
+
40
+ # With pandas support (P2)
41
+ pip install "rost-io[pandas]"
42
+
43
+ # Everything
44
+ pip install "rost-io[all]"
45
+ ```
46
+
47
+ ## Quick start
48
+
49
+ ```python
50
+ from rost_io import CsvAdapter, validate_staff
51
+
52
+ # Convert a CSV staff export to canonical staff.json
53
+ adapter = CsvAdapter("hr_export.csv", id_col="employee_id", tags_col="roles")
54
+ staff_json = adapter.to_staff_json()
55
+
56
+ # Validate against the canonical schema
57
+ validate_staff(staff_json) # raises jsonschema.ValidationError if invalid
58
+
59
+ # Write for the compiler
60
+ import json
61
+ with open("staff.json", "w") as f:
62
+ json.dump(staff_json, f, indent=2)
63
+ ```
64
+
65
+ ## Canonical JSON schemas
66
+
67
+ | File | Schema ID | Description |
68
+ |------------------|--------------------|-------------------------------------|
69
+ | `staff.json` | `rost/staff/v1` | People + tags |
70
+ | `leave.json` | `rost/leave/v1` | Leave/absence entries |
71
+ | `calendar.json` | `rost/calendar/v1` | Date range + public holidays |
72
+ | `solution.json` | `rost/solution/v1` | Solver output (read-only for rost-io)|
73
+
74
+ ## Adapters
75
+
76
+ | Adapter | Priority | Extra dep |
77
+ |--------------------|----------|---------------------|
78
+ | `CsvAdapter` | P0 | none (stdlib) |
79
+ | `JsonAdapter` | P0 | none |
80
+ | `ParquetAdapter` | P1 | `pyarrow` |
81
+ | `DatabaseAdapter` | P1 | `sqlalchemy` |
82
+ | `ExcelAdapter` | P2 | `openpyxl` |
83
+ | `PandasAdapter` | P2 | `pandas` |
@@ -0,0 +1,35 @@
1
+ [build-system]
2
+ requires = ["setuptools>=67", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "rost-io"
7
+ version = "0.1.0"
8
+ description = "Adapter layer for .rost — converts any data source to canonical JSON"
9
+ readme = "README.md"
10
+ requires-python = ">=3.10"
11
+ license = { text = "MIT" }
12
+
13
+ dependencies = [
14
+ "jsonschema>=4.0",
15
+ ]
16
+
17
+ [project.optional-dependencies]
18
+ csv = [] # stdlib csv — no extra dep
19
+ parquet = ["pyarrow>=13.0"]
20
+ db = ["sqlalchemy>=2.0"]
21
+ excel = ["openpyxl>=3.1"]
22
+ pandas = ["pandas>=2.0"]
23
+ llm = ["instructor>=1.0", "openai>=1.0"]
24
+ all = [
25
+ "pyarrow>=13.0",
26
+ "sqlalchemy>=2.0",
27
+ "openpyxl>=3.1",
28
+ "pandas>=2.0",
29
+ "instructor>=1.0",
30
+ "openai>=1.0",
31
+ ]
32
+
33
+ [tool.setuptools.packages.find]
34
+ where = ["."]
35
+ include = ["rost_io*"]
@@ -0,0 +1,42 @@
1
+ """
2
+ rost-io — Adapter layer for .rost
3
+ Converts any data source to canonical JSON (rost/staff/v1, rost/leave/v1, rost/calendar/v1).
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from rost_io.base import RostAdapter
9
+ from rost_io.validation import validate_staff, validate_leave, validate_calendar, validate_solution
10
+ from rost_io.adapters.csv_adapter import CsvAdapter
11
+ from rost_io.adapters.json_adapter import JsonAdapter
12
+
13
+ __all__ = [
14
+ # Base class
15
+ "RostAdapter",
16
+ # Validation helpers
17
+ "validate_staff",
18
+ "validate_leave",
19
+ "validate_calendar",
20
+ "validate_solution",
21
+ # P0 adapters (no extra deps)
22
+ "CsvAdapter",
23
+ "JsonAdapter",
24
+ ]
25
+
26
+ # P1/P2 adapters are imported lazily so missing optional deps don't break the
27
+ # core import. Users should install extras: pip install "rost-io[parquet]" etc.
28
+
29
+ def __getattr__(name: str):
30
+ if name == "ParquetAdapter":
31
+ from rost_io.adapters.parquet_adapter import ParquetAdapter
32
+ return ParquetAdapter
33
+ if name == "DatabaseAdapter":
34
+ from rost_io.adapters.database_adapter import DatabaseAdapter
35
+ return DatabaseAdapter
36
+ if name == "ExcelAdapter":
37
+ from rost_io.adapters.excel_adapter import ExcelAdapter
38
+ return ExcelAdapter
39
+ if name == "PandasAdapter":
40
+ from rost_io.adapters.pandas_adapter import PandasAdapter
41
+ return PandasAdapter
42
+ raise AttributeError(f"module 'rost_io' has no attribute {name!r}")
File without changes
@@ -0,0 +1,234 @@
1
+ """
2
+ CsvAdapter — P0 adapter that reads CSV staff/leave exports.
3
+
4
+ No extra dependencies beyond the Python standard library.
5
+
6
+ Staff CSV format (minimum):
7
+ name,tags
8
+ Alice,trainee
9
+ Bob,ac
10
+
11
+ Staff CSV format (extended):
12
+ employee_id,full_name,role,department
13
+ alice,Alice Wong,trainee,ED
14
+ bob,Bob Smith,ac,ED
15
+
16
+ Leave CSV format (minimum):
17
+ person_id,start,end
18
+ alice,2026-05-10,2026-05-12
19
+
20
+ Leave CSV format (extended):
21
+ person_id,start,end,type,priority,approved
22
+ alice,2026-05-10,2026-05-12,annual,normal,true
23
+ """
24
+
25
+ from __future__ import annotations
26
+
27
+ import csv
28
+ import re
29
+ from pathlib import Path
30
+ from typing import Any
31
+
32
+ from rost_io.base import RostAdapter
33
+
34
+
35
+ def _parse_bool(s: str) -> bool:
36
+ return s.strip().lower() in ("true", "yes", "1", "y")
37
+
38
+
39
+ def _normalise_id(s: str) -> str:
40
+ """Lower-case, strip, replace spaces/special chars with underscores."""
41
+ return re.sub(r"[^a-z0-9_]", "_", s.strip().lower()).strip("_")
42
+
43
+
44
+ class CsvAdapter(RostAdapter):
45
+ """
46
+ Reads a CSV file and converts it to canonical rost/staff/v1 or
47
+ rost/leave/v1 JSON.
48
+
49
+ Args:
50
+ path: Path to the CSV file.
51
+ id_col: Column name for the person identifier.
52
+ Defaults to first of: ``id``, ``employee_id``, ``name``.
53
+ display_name_col: Column name for the display name (optional).
54
+ tags_col: Column name for comma-separated tags (optional).
55
+ extra_tag_cols: Additional columns to include as ``col:value`` tags.
56
+ normalise_ids: Whether to normalise IDs (lower-case, underscore).
57
+ Default True.
58
+ encoding: CSV file encoding. Default ``utf-8``.
59
+ """
60
+
61
+ # Ordered list of fallback column names when id_col is not specified
62
+ _ID_FALLBACKS = ("id", "employee_id", "name", "person_id")
63
+
64
+ def __init__(
65
+ self,
66
+ path: str | Path,
67
+ *,
68
+ id_col: str | None = None,
69
+ display_name_col: str | None = None,
70
+ tags_col: str | None = None,
71
+ extra_tag_cols: list[str] | None = None,
72
+ normalise_ids: bool = True,
73
+ encoding: str = "utf-8",
74
+ ) -> None:
75
+ self.path = Path(path)
76
+ self.id_col = id_col
77
+ self.display_name_col = display_name_col
78
+ self.tags_col = tags_col
79
+ self.extra_tag_cols = extra_tag_cols or []
80
+ self.normalise_ids = normalise_ids
81
+ self.encoding = encoding
82
+
83
+ # ── staff ──────────────────────────────────────────────────────────────────
84
+
85
+ def to_staff_json(self) -> dict:
86
+ """
87
+ Convert the CSV to canonical rost/staff/v1 JSON.
88
+
89
+ Returns:
90
+ dict with ``"schema": "rost/staff/v1"`` and ``"people"`` list.
91
+ """
92
+ rows = self._read_csv()
93
+ if not rows:
94
+ return {"schema": "rost/staff/v1", "people": []}
95
+
96
+ headers = list(rows[0].keys())
97
+ id_col = self._resolve_col(headers, self.id_col, self._ID_FALLBACKS, "id/name")
98
+ display_col = self._resolve_col(
99
+ headers, self.display_name_col,
100
+ ("display_name", "full_name", "name", "Display Name", "Full Name"),
101
+ required=False,
102
+ )
103
+ tags_col = self._resolve_col(
104
+ headers, self.tags_col, ("tags", "roles", "role", "tag"),
105
+ required=False,
106
+ )
107
+ extra_cols = [
108
+ c for c in self.extra_tag_cols if c in headers
109
+ ]
110
+
111
+ people = []
112
+ for row in rows:
113
+ raw_id = row.get(id_col, "").strip()
114
+ if not raw_id:
115
+ continue
116
+ person_id = _normalise_id(raw_id) if self.normalise_ids else raw_id
117
+ display_name = row.get(display_col, raw_id).strip() if display_col else raw_id
118
+
119
+ tags: list[str] = []
120
+ if tags_col:
121
+ tags_str = row.get(tags_col, "").strip()
122
+ tags = [t.strip() for t in tags_str.split(",") if t.strip()]
123
+ for col in extra_cols:
124
+ val = row.get(col, "").strip()
125
+ if val:
126
+ tags.append(f"{col}:{val}")
127
+
128
+ people.append({
129
+ "id": person_id,
130
+ "display_name": display_name,
131
+ "tags": tags,
132
+ "custom": {},
133
+ })
134
+
135
+ return {"schema": "rost/staff/v1", "people": people}
136
+
137
+ # ── leave ──────────────────────────────────────────────────────────────────
138
+
139
+ def to_leave_json(self) -> dict:
140
+ """
141
+ Convert the CSV to canonical rost/leave/v1 JSON.
142
+
143
+ Returns:
144
+ dict with ``"schema": "rost/leave/v1"`` and ``"entries"`` list.
145
+ """
146
+ rows = self._read_csv()
147
+ if not rows:
148
+ return {"schema": "rost/leave/v1", "entries": []}
149
+
150
+ headers = list(rows[0].keys())
151
+ person_col = self._resolve_col(
152
+ headers, None, ("person_id", "person", "employee_id", "name", "id"), "person_id"
153
+ )
154
+ start_col = self._resolve_col(headers, None, ("start", "start_date", "from"), "start")
155
+ end_col = self._resolve_col(
156
+ headers, None, ("end", "end_date", "to"), required=False
157
+ )
158
+ type_col = self._resolve_col(
159
+ headers, None, ("type", "leave_type", "category"), required=False
160
+ )
161
+ priority_col = self._resolve_col(
162
+ headers, None, ("priority",), required=False
163
+ )
164
+ approved_col = self._resolve_col(
165
+ headers, None, ("approved", "is_approved"), required=False
166
+ )
167
+
168
+ entries = []
169
+ for row in rows:
170
+ person_id = row.get(person_col, "").strip()
171
+ if not person_id:
172
+ continue
173
+ if self.normalise_ids:
174
+ person_id = _normalise_id(person_id)
175
+
176
+ start = row.get(start_col, "").strip()
177
+ if not start:
178
+ continue
179
+
180
+ entry: dict[str, Any] = {"person_id": person_id, "start": start}
181
+ if end_col:
182
+ end = row.get(end_col, "").strip()
183
+ if end:
184
+ entry["end"] = end
185
+ if type_col:
186
+ t = row.get(type_col, "").strip()
187
+ if t:
188
+ entry["type"] = t
189
+ if priority_col:
190
+ p = row.get(priority_col, "").strip()
191
+ if p:
192
+ entry["priority"] = p
193
+ if approved_col:
194
+ a = row.get(approved_col, "").strip()
195
+ if a:
196
+ entry["approved"] = _parse_bool(a)
197
+
198
+ entries.append(entry)
199
+
200
+ return {"schema": "rost/leave/v1", "entries": entries}
201
+
202
+ # ── internal ───────────────────────────────────────────────────────────────
203
+
204
+ def _read_csv(self) -> list[dict[str, str]]:
205
+ with open(self.path, newline="", encoding=self.encoding) as f:
206
+ reader = csv.DictReader(f)
207
+ return list(reader)
208
+
209
+ @staticmethod
210
+ def _resolve_col(
211
+ headers: list[str],
212
+ explicit: str | None,
213
+ fallbacks: tuple[str, ...],
214
+ label: str = "",
215
+ *,
216
+ required: bool = True,
217
+ ) -> str | None:
218
+ if explicit is not None:
219
+ if explicit not in headers:
220
+ raise ValueError(
221
+ f"Column '{explicit}' not found in CSV. "
222
+ f"Available: {', '.join(headers)}"
223
+ )
224
+ return explicit
225
+ for fb in fallbacks:
226
+ if fb in headers:
227
+ return fb
228
+ if required:
229
+ raise ValueError(
230
+ f"Cannot find a '{label}' column in CSV. "
231
+ f"Available: {', '.join(headers)}. "
232
+ f"Pass the correct column name explicitly."
233
+ )
234
+ return None
@@ -0,0 +1,133 @@
1
+ """
2
+ DatabaseAdapter — P1 adapter for PostgreSQL and MySQL via SQLAlchemy.
3
+
4
+ Install: pip install "rost-io[db]"
5
+ For PostgreSQL also: pip install psycopg2-binary (or psycopg)
6
+ For MySQL also: pip install pymysql (or mysqlclient)
7
+
8
+ Usage:
9
+
10
+ adapter = DatabaseAdapter(
11
+ "postgresql+psycopg2://user:pass@host/dbname",
12
+ people_query="SELECT employee_id AS id, full_name AS display_name, roles AS tags FROM staff",
13
+ leave_query='''
14
+ SELECT employee_id AS person_id, leave_start AS start, leave_end AS end,
15
+ leave_type AS type, priority
16
+ FROM employee_leave WHERE approved = TRUE
17
+ ''',
18
+ )
19
+ staff_json = adapter.to_staff_json()
20
+ leave_json = adapter.to_leave_json()
21
+ """
22
+
23
+ from __future__ import annotations
24
+
25
+ from typing import Any
26
+
27
+ from rost_io.base import RostAdapter
28
+
29
+
30
+ class DatabaseAdapter(RostAdapter):
31
+ """
32
+ Reads from a SQL database (PostgreSQL or MySQL) via SQLAlchemy.
33
+
34
+ Requires ``sqlalchemy``: pip install "rost-io[db]"
35
+ Also requires a DB driver:
36
+ PostgreSQL → psycopg2-binary or psycopg
37
+ MySQL → pymysql or mysqlclient
38
+
39
+ Column aliases in your SQL queries must match the canonical field names:
40
+ Staff: ``id``, ``display_name`` (optional), ``tags`` (optional)
41
+ Leave: ``person_id``, ``start``, ``end`` (optional), ``type`` (optional),
42
+ ``priority`` (optional), ``approved`` (optional)
43
+
44
+ Args:
45
+ connection_string: SQLAlchemy connection URL.
46
+ people_query: SQL that returns the staff rows.
47
+ leave_query: SQL that returns the leave rows (optional).
48
+ tags_separator: Separator for tags when stored as a delimited string.
49
+ Default ``","`` (comma).
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ connection_string: str,
55
+ *,
56
+ people_query: str = "SELECT * FROM staff",
57
+ leave_query: str | None = None,
58
+ tags_separator: str = ",",
59
+ ) -> None:
60
+ self.connection_string = connection_string
61
+ self.people_query = people_query
62
+ self.leave_query = leave_query
63
+ self.tags_separator = tags_separator
64
+
65
+ def _engine(self):
66
+ try:
67
+ from sqlalchemy import create_engine
68
+ except ImportError as exc:
69
+ raise ImportError(
70
+ "sqlalchemy is required for DatabaseAdapter: pip install 'rost-io[db]'"
71
+ ) from exc
72
+ return create_engine(self.connection_string)
73
+
74
+ def _query(self, sql: str) -> list[dict[str, Any]]:
75
+ from sqlalchemy import text
76
+ engine = self._engine()
77
+ with engine.connect() as conn:
78
+ result = conn.execute(text(sql))
79
+ keys = list(result.keys())
80
+ return [dict(zip(keys, row)) for row in result]
81
+
82
+ # ── staff ──────────────────────────────────────────────────────────────────
83
+
84
+ def to_staff_json(self) -> dict:
85
+ rows = self._query(self.people_query)
86
+ people = []
87
+ for row in rows:
88
+ person_id = str(row.get("id") or row.get("name") or "").strip()
89
+ if not person_id:
90
+ continue
91
+ tags = self._parse_tags(row.get("tags"))
92
+ people.append({
93
+ "id": person_id,
94
+ "display_name": str(row.get("display_name", person_id)).strip(),
95
+ "tags": tags,
96
+ "custom": {},
97
+ })
98
+ return {"schema": "rost/staff/v1", "people": people}
99
+
100
+ # ── leave ──────────────────────────────────────────────────────────────────
101
+
102
+ def to_leave_json(self) -> dict:
103
+ if not self.leave_query:
104
+ raise ValueError(
105
+ "DatabaseAdapter: provide leave_query to use to_leave_json()"
106
+ )
107
+ rows = self._query(self.leave_query)
108
+ entries = []
109
+ for row in rows:
110
+ person_id = str(row.get("person_id") or row.get("person") or "").strip()
111
+ start = str(row.get("start") or row.get("start_date") or "").strip()
112
+ if not person_id or not start:
113
+ continue
114
+ entry: dict[str, Any] = {"person_id": person_id, "start": start}
115
+ if row.get("end") or row.get("end_date"):
116
+ entry["end"] = str(row.get("end") or row.get("end_date")).strip()
117
+ if row.get("type"):
118
+ entry["type"] = str(row["type"]).strip()
119
+ if row.get("priority"):
120
+ entry["priority"] = str(row["priority"]).strip()
121
+ if "approved" in row and row["approved"] is not None:
122
+ entry["approved"] = bool(row["approved"])
123
+ entries.append(entry)
124
+ return {"schema": "rost/leave/v1", "entries": entries}
125
+
126
+ # ── helpers ────────────────────────────────────────────────────────────────
127
+
128
+ def _parse_tags(self, raw: Any) -> list[str]:
129
+ if raw is None:
130
+ return []
131
+ if isinstance(raw, list):
132
+ return [str(t) for t in raw if t]
133
+ return [t.strip() for t in str(raw).split(self.tags_separator) if t.strip()]