skipper-core 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,50 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.pyo
5
+ *.pyd
6
+ .Python
7
+ *.egg-info/
8
+ dist/
9
+ build/
10
+ *.egg
11
+ .eggs/
12
+
13
+ # Virtual environments
14
+ .venv/
15
+ venv/
16
+ env/
17
+
18
+ # uv
19
+ .uv/
20
+
21
+ # Testing
22
+ .pytest_cache/
23
+ .coverage
24
+ htmlcov/
25
+ .tox/
26
+ .nox/
27
+
28
+ # Type checking
29
+ .mypy_cache/
30
+
31
+ # IDE
32
+ .idea/
33
+ .vscode/
34
+ *.swp
35
+ *.swo
36
+
37
+ # Temp files
38
+ *.tmp
39
+ /tmp/
40
+
41
+ # Credentials (never commit)
42
+ service-account-skipper-bot.json
43
+ *.json.bak
44
+
45
+ # Environment
46
+ .env
47
+ .env.local
48
+
49
+ # macOS
50
+ .DS_Store
@@ -0,0 +1,59 @@
1
+ Metadata-Version: 2.4
2
+ Name: skipper-core
3
+ Version: 0.1.0
4
+ Summary: Core Google Sheets client and resolver for Skipper test-gating
5
+ Project-URL: Homepage, https://github.com/get-skipper/skipper-python
6
+ Project-URL: Repository, https://github.com/get-skipper/skipper-python
7
+ License: MIT
8
+ Keywords: google-sheets,skipper,test-gating,testing
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Classifier: Topic :: Software Development :: Testing
18
+ Requires-Python: >=3.10
19
+ Requires-Dist: google-api-python-client>=2.100
20
+ Requires-Dist: google-auth>=2.20
21
+ Description-Content-Type: text/markdown
22
+
23
+ # skipper-core
24
+
25
+ Core Google Sheets client and resolver for the Skipper test-gating system.
26
+
27
+ This package contains the shared logic used by all Skipper framework integrations:
28
+ - `SkipperResolver` — initialises from a Google Sheet and answers `is_test_enabled(test_id)`
29
+ - `SheetsClient` — authenticates and fetches spreadsheet data
30
+ - `SheetsWriter` — reconciles the spreadsheet in sync mode
31
+ - `CacheManager` — cross-process cache sharing for parallel test runners
32
+ - `build_test_id` / `normalize_test_id` — canonical test ID helpers
33
+ - Credential types: `FileCredentials`, `Base64Credentials`, `ServiceAccountCredentials`
34
+
35
+ See the [root README](../../README.md) for full documentation.
36
+
37
+ ## Installation
38
+
39
+ ```bash
40
+ pip install skipper-core
41
+ ```
42
+
43
+ ## Usage
44
+
45
+ ```python
46
+ from skipper_core import SkipperConfig, SkipperResolver, FileCredentials
47
+
48
+ config = SkipperConfig(
49
+ spreadsheet_id="YOUR_SPREADSHEET_ID",
50
+ credentials=FileCredentials("./service-account.json"),
51
+ sheet_name="skipper-python",
52
+ )
53
+
54
+ resolver = SkipperResolver(config)
55
+ resolver.initialize()
56
+
57
+ if not resolver.is_test_enabled("tests/test_auth.py > test_login"):
58
+ pytest.skip("[skipper] Test disabled")
59
+ ```
@@ -0,0 +1,37 @@
1
+ # skipper-core
2
+
3
+ Core Google Sheets client and resolver for the Skipper test-gating system.
4
+
5
+ This package contains the shared logic used by all Skipper framework integrations:
6
+ - `SkipperResolver` — initialises from a Google Sheet and answers `is_test_enabled(test_id)`
7
+ - `SheetsClient` — authenticates and fetches spreadsheet data
8
+ - `SheetsWriter` — reconciles the spreadsheet in sync mode
9
+ - `CacheManager` — cross-process cache sharing for parallel test runners
10
+ - `build_test_id` / `normalize_test_id` — canonical test ID helpers
11
+ - Credential types: `FileCredentials`, `Base64Credentials`, `ServiceAccountCredentials`
12
+
13
+ See the [root README](../../README.md) for full documentation.
14
+
15
+ ## Installation
16
+
17
+ ```bash
18
+ pip install skipper-core
19
+ ```
20
+
21
+ ## Usage
22
+
23
+ ```python
24
+ from skipper_core import SkipperConfig, SkipperResolver, FileCredentials
25
+
26
+ config = SkipperConfig(
27
+ spreadsheet_id="YOUR_SPREADSHEET_ID",
28
+ credentials=FileCredentials("./service-account.json"),
29
+ sheet_name="skipper-python",
30
+ )
31
+
32
+ resolver = SkipperResolver(config)
33
+ resolver.initialize()
34
+
35
+ if not resolver.is_test_enabled("tests/test_auth.py > test_login"):
36
+ pytest.skip("[skipper] Test disabled")
37
+ ```
@@ -0,0 +1,34 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "skipper-core"
7
+ version = "0.1.0"
8
+ description = "Core Google Sheets client and resolver for Skipper test-gating"
9
+ readme = "README.md"
10
+ license = { text = "MIT" }
11
+ requires-python = ">=3.10"
12
+ keywords = ["testing", "test-gating", "google-sheets", "skipper"]
13
+ classifiers = [
14
+ "Development Status :: 4 - Beta",
15
+ "Intended Audience :: Developers",
16
+ "License :: OSI Approved :: MIT License",
17
+ "Programming Language :: Python :: 3",
18
+ "Programming Language :: Python :: 3.10",
19
+ "Programming Language :: Python :: 3.11",
20
+ "Programming Language :: Python :: 3.12",
21
+ "Programming Language :: Python :: 3.13",
22
+ "Topic :: Software Development :: Testing",
23
+ ]
24
+ dependencies = [
25
+ "google-api-python-client>=2.100",
26
+ "google-auth>=2.20",
27
+ ]
28
+
29
+ [project.urls]
30
+ Homepage = "https://github.com/get-skipper/skipper-python"
31
+ Repository = "https://github.com/get-skipper/skipper-python"
32
+
33
+ [tool.hatch.build.targets.wheel]
34
+ packages = ["src/skipper_core"]
@@ -0,0 +1,35 @@
1
+ """skipper-core — Google Sheets client and resolver for Skipper test-gating."""
2
+
3
+ from .cache import CacheManager
4
+ from .client import FetchAllResult, SheetFetchResult, SheetsClient, TestEntry
5
+ from .config import SkipperConfig
6
+ from .credentials import Base64Credentials, Credentials, FileCredentials, ServiceAccountCredentials
7
+ from .logger import log, logf, warn
8
+ from .mode import SkipperMode, mode_from_env
9
+ from .resolver import SkipperResolver
10
+ from .testid import build_test_id, normalize_test_id
11
+ from .writer import SheetsWriter
12
+
13
+ __all__ = [
14
+ "CacheManager",
15
+ "FetchAllResult",
16
+ "SheetFetchResult",
17
+ "SheetsClient",
18
+ "SheetsWriter",
19
+ "SkipperConfig",
20
+ "SkipperMode",
21
+ "SkipperResolver",
22
+ "TestEntry",
23
+ # Credentials
24
+ "Base64Credentials",
25
+ "Credentials",
26
+ "FileCredentials",
27
+ "ServiceAccountCredentials",
28
+ # Helpers
29
+ "build_test_id",
30
+ "log",
31
+ "logf",
32
+ "mode_from_env",
33
+ "normalize_test_id",
34
+ "warn",
35
+ ]
@@ -0,0 +1,63 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import secrets
6
+ import tempfile
7
+ import time
8
+ from pathlib import Path
9
+
10
+ from .logger import logf, warn
11
+
12
+ _CACHE_FILE_NAME = "cache.json"
13
+
14
+
15
+ class CacheManager:
16
+ """Manages the temporary directory for sharing resolver state between processes."""
17
+
18
+ def write_resolver_cache(self, data: bytes) -> str:
19
+ """Write cache bytes to a temp directory. Returns the directory path.
20
+
21
+ Set SKIPPER_CACHE_FILE to <dir>/cache.json so worker processes can
22
+ rehydrate the resolver without re-fetching from Google Sheets.
23
+ """
24
+ tmpdir = tempfile.mkdtemp(prefix="skipper-")
25
+ path = os.path.join(tmpdir, _CACHE_FILE_NAME)
26
+ Path(path).write_bytes(data)
27
+ logf("wrote resolver cache to %s", path)
28
+ return tmpdir
29
+
30
+ def read_resolver_cache(self, cache_file: str) -> bytes:
31
+ """Read serialized resolver data from the given file path."""
32
+ return Path(cache_file).read_bytes()
33
+
34
+ def write_discovered_ids(self, directory: str, ids: list[str]) -> None:
35
+ """Write discovered test IDs to a uniquely-named file in directory."""
36
+ name = f"{os.getpid()}-{time.time_ns()}-{secrets.token_hex(4)}.json"
37
+ path = os.path.join(directory, name)
38
+ Path(path).write_text(json.dumps(ids), encoding="utf-8")
39
+
40
+ def merge_discovered_ids(self, directory: str) -> list[str]:
41
+ """Read all per-process discovered ID files, deduplicate, and return combined list."""
42
+ seen: set[str] = set()
43
+ result: list[str] = []
44
+
45
+ for entry in sorted(Path(directory).iterdir()):
46
+ if entry.is_dir() or entry.name == _CACHE_FILE_NAME or entry.suffix != ".json":
47
+ continue
48
+ try:
49
+ ids: list[str] = json.loads(entry.read_text(encoding="utf-8"))
50
+ for id_ in ids:
51
+ if id_ not in seen:
52
+ seen.add(id_)
53
+ result.append(id_)
54
+ except Exception as exc:
55
+ warn(f"cannot read discovered file {entry.name!r}: {exc}")
56
+
57
+ return result
58
+
59
+ def cleanup(self, directory: str) -> None:
60
+ """Remove the temp directory and all its contents."""
61
+ import shutil
62
+
63
+ shutil.rmtree(directory, ignore_errors=True)
@@ -0,0 +1,192 @@
1
+ from __future__ import annotations
2
+
3
+ import dataclasses
4
+ from datetime import datetime, timezone
5
+ from typing import Any
6
+
7
+ from google.oauth2 import service_account
8
+ from googleapiclient.discovery import build # type: ignore[import-untyped]
9
+
10
+ from .config import SkipperConfig
11
+ from .logger import logf, warn
12
+ from .testid import normalize_test_id
13
+
14
+ _SHEETS_SCOPE = "https://www.googleapis.com/auth/spreadsheets"
15
+
16
+
17
+ @dataclasses.dataclass
18
+ class TestEntry:
19
+ test_id: str
20
+ disabled_until: datetime | None # None = no date → test is enabled
21
+ notes: str = ""
22
+
23
+
24
+ @dataclasses.dataclass
25
+ class SheetFetchResult:
26
+ sheet_name: str
27
+ sheet_id: int
28
+ header: list[str]
29
+ entries: list[TestEntry]
30
+
31
+
32
+ @dataclasses.dataclass
33
+ class FetchAllResult:
34
+ primary: SheetFetchResult
35
+ entries: list[TestEntry] # merged from primary + reference sheets
36
+ service: Any # googleapiclient Resource — reused by SheetsWriter
37
+
38
+
39
+ class SheetsClient:
40
+ def __init__(self, config: SkipperConfig) -> None:
41
+ self._config = config
42
+
43
+ def fetch_all(self) -> FetchAllResult:
44
+ cred_json = self._config.credentials.resolve()
45
+ import json as _json
46
+
47
+ info = _json.loads(cred_json)
48
+ creds = service_account.Credentials.from_service_account_info(info, scopes=[_SHEETS_SCOPE]) # type: ignore[no-untyped-call]
49
+ svc = build("sheets", "v4", credentials=creds, cache_discovery=False)
50
+
51
+ spreadsheet = svc.spreadsheets().get(spreadsheetId=self._config.spreadsheet_id).execute()
52
+
53
+ primary_name = self._config.sheet_name
54
+ if not primary_name:
55
+ sheets = spreadsheet.get("sheets", [])
56
+ primary_name = sheets[0]["properties"]["title"] if sheets else ""
57
+
58
+ primary = self._fetch_sheet(svc, primary_name, spreadsheet)
59
+
60
+ merged = _merge_entries([], primary.entries)
61
+
62
+ for ref_name in self._config.reference_sheets:
63
+ try:
64
+ ref = self._fetch_sheet(svc, ref_name, spreadsheet)
65
+ merged = _merge_entries(merged, ref.entries)
66
+ except Exception as exc:
67
+ warn(f"cannot fetch reference sheet {ref_name!r}: {exc}")
68
+
69
+ return FetchAllResult(primary=primary, entries=merged, service=svc)
70
+
71
+ def _fetch_sheet(
72
+ self,
73
+ svc: Any,
74
+ sheet_name: str,
75
+ spreadsheet: dict[str, Any],
76
+ ) -> SheetFetchResult:
77
+ resp = (
78
+ svc.spreadsheets()
79
+ .values()
80
+ .get(spreadsheetId=self._config.spreadsheet_id, range=sheet_name)
81
+ .execute()
82
+ )
83
+ values: list[list[Any]] = resp.get("values", [])
84
+
85
+ if not values:
86
+ return SheetFetchResult(
87
+ sheet_name=sheet_name,
88
+ sheet_id=_sheet_id_by_name(spreadsheet, sheet_name),
89
+ header=[],
90
+ entries=[],
91
+ )
92
+
93
+ header = [str(v) for v in values[0]]
94
+ test_id_idx = _index_of(header, self._config.test_id_column)
95
+ disabled_until_idx = _index_of(header, self._config.disabled_until_column)
96
+ notes_idx = _index_of(header, "notes")
97
+
98
+ if test_id_idx < 0:
99
+ raise ValueError(
100
+ f"column {self._config.test_id_column!r} not found in sheet {sheet_name!r}"
101
+ )
102
+
103
+ entries: list[TestEntry] = []
104
+ for row in values[1:]:
105
+ if test_id_idx >= len(row):
106
+ continue
107
+ test_id = str(row[test_id_idx]).strip()
108
+ if not test_id:
109
+ continue
110
+
111
+ disabled_until: datetime | None = None
112
+ if disabled_until_idx >= 0 and disabled_until_idx < len(row):
113
+ raw = str(row[disabled_until_idx]).strip()
114
+ if raw:
115
+ parsed = _parse_date(raw)
116
+ if parsed is not None:
117
+ disabled_until = parsed
118
+ else:
119
+ warn(f"cannot parse disabledUntil {raw!r} for test {test_id!r}")
120
+
121
+ notes = ""
122
+ if notes_idx >= 0 and notes_idx < len(row):
123
+ notes = str(row[notes_idx])
124
+
125
+ entries.append(TestEntry(test_id=test_id, disabled_until=disabled_until, notes=notes))
126
+
127
+ logf("fetched %d entries from sheet %r", len(entries), sheet_name)
128
+ return SheetFetchResult(
129
+ sheet_name=sheet_name,
130
+ sheet_id=_sheet_id_by_name(spreadsheet, sheet_name),
131
+ header=header,
132
+ entries=entries,
133
+ )
134
+
135
+
136
+ def _merge_entries(existing: list[TestEntry], incoming: list[TestEntry]) -> list[TestEntry]:
137
+ """Merge incoming entries, keeping the most restrictive (latest) disabledUntil."""
138
+ idx: dict[str, int] = {}
139
+ result = list(existing)
140
+ for i, e in enumerate(result):
141
+ idx[normalize_test_id(e.test_id)] = i
142
+
143
+ for e in incoming:
144
+ nid = normalize_test_id(e.test_id)
145
+ if nid in idx:
146
+ current = result[idx[nid]].disabled_until
147
+ if _more_restrictive(e.disabled_until, current):
148
+ result[idx[nid]] = dataclasses.replace(
149
+ result[idx[nid]], disabled_until=e.disabled_until
150
+ )
151
+ else:
152
+ idx[nid] = len(result)
153
+ result.append(e)
154
+
155
+ return result
156
+
157
+
158
+ def _more_restrictive(candidate: datetime | None, current: datetime | None) -> bool:
159
+ if candidate is None:
160
+ return False
161
+ if current is None:
162
+ return True
163
+ return candidate > current
164
+
165
+
166
+ def _parse_date(s: str) -> datetime | None:
167
+
168
+ formats = ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S"]
169
+ for fmt in formats:
170
+ try:
171
+ dt = datetime.strptime(s, fmt)
172
+ if fmt == "%Y-%m-%d":
173
+ # Treat as end-of-day UTC so the full day is disabled.
174
+ dt = dt.replace(hour=23, minute=59, second=59, tzinfo=timezone.utc)
175
+ return dt
176
+ except ValueError:
177
+ continue
178
+ return None
179
+
180
+
181
+ def _index_of(header: list[str], col: str) -> int:
182
+ try:
183
+ return header.index(col)
184
+ except ValueError:
185
+ return -1
186
+
187
+
188
+ def _sheet_id_by_name(spreadsheet: dict[str, Any], name: str) -> int:
189
+ for s in spreadsheet.get("sheets", []):
190
+ if s["properties"]["title"] == name:
191
+ return int(s["properties"]["sheetId"])
192
+ return 0
@@ -0,0 +1,18 @@
1
+ from __future__ import annotations
2
+
3
+ import dataclasses
4
+ from typing import TYPE_CHECKING
5
+
6
+ if TYPE_CHECKING:
7
+ from .credentials import Credentials
8
+
9
+
10
+ @dataclasses.dataclass(frozen=True)
11
+ class SkipperConfig:
12
+ spreadsheet_id: str
13
+ credentials: Credentials
14
+ sheet_name: str | None = None
15
+ reference_sheets: tuple[str, ...] = ()
16
+ # Column header names in the spreadsheet (camelCase matches all other ports).
17
+ test_id_column: str = "testId"
18
+ disabled_until_column: str = "disabledUntil"
@@ -0,0 +1,45 @@
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ import dataclasses
5
+ import json
6
+ from pathlib import Path
7
+ from typing import Protocol, runtime_checkable
8
+
9
+
10
+ @runtime_checkable
11
+ class Credentials(Protocol):
12
+ def resolve(self) -> bytes: ...
13
+
14
+
15
+ @dataclasses.dataclass(frozen=True)
16
+ class FileCredentials:
17
+ path: str
18
+
19
+ def resolve(self) -> bytes:
20
+ return Path(self.path).read_bytes()
21
+
22
+
23
+ @dataclasses.dataclass(frozen=True)
24
+ class Base64Credentials:
25
+ encoded: str
26
+
27
+ def resolve(self) -> bytes:
28
+ return base64.b64decode(self.encoded)
29
+
30
+
31
+ @dataclasses.dataclass(frozen=True)
32
+ class ServiceAccountCredentials:
33
+ type: str
34
+ project_id: str
35
+ private_key_id: str
36
+ private_key: str
37
+ client_email: str
38
+ client_id: str
39
+ auth_uri: str
40
+ token_uri: str
41
+ auth_provider_x509_cert_url: str
42
+ client_x509_cert_url: str
43
+
44
+ def resolve(self) -> bytes:
45
+ return json.dumps(dataclasses.asdict(self)).encode()
@@ -0,0 +1,30 @@
1
+ import logging
2
+ import os
3
+
4
+ _logger = logging.getLogger("skipper")
5
+ _handler = logging.StreamHandler()
6
+ _handler.setFormatter(logging.Formatter("[skipper] %(message)s"))
7
+ _logger.addHandler(_handler)
8
+ _logger.propagate = False
9
+
10
+
11
+ def _is_debug_enabled() -> bool:
12
+ return bool(os.getenv("SKIPPER_DEBUG"))
13
+
14
+
15
+ def log(msg: str) -> None:
16
+ if _is_debug_enabled():
17
+ _logger.setLevel(logging.DEBUG)
18
+ _logger.debug(msg)
19
+
20
+
21
+ def logf(fmt: str, *args: object) -> None:
22
+ if _is_debug_enabled():
23
+ _logger.setLevel(logging.DEBUG)
24
+ _logger.debug(fmt, *args)
25
+
26
+
27
+ def warn(msg: str) -> None:
28
+ if _is_debug_enabled():
29
+ _logger.setLevel(logging.DEBUG)
30
+ _logger.warning("WARN: %s", msg)
@@ -0,0 +1,13 @@
1
+ import os
2
+ from enum import Enum
3
+
4
+
5
+ class SkipperMode(str, Enum):
6
+ READ_ONLY = "read-only"
7
+ SYNC = "sync"
8
+
9
+
10
+ def mode_from_env() -> SkipperMode:
11
+ if os.getenv("SKIPPER_MODE") == "sync":
12
+ return SkipperMode.SYNC
13
+ return SkipperMode.READ_ONLY
@@ -0,0 +1,84 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from datetime import datetime, timezone
5
+ from typing import Any
6
+
7
+ from .client import SheetsClient
8
+ from .config import SkipperConfig
9
+ from .logger import log, logf
10
+ from .testid import normalize_test_id
11
+
12
+
13
+ class SkipperResolver:
14
+ """Fetches a Google Spreadsheet and determines whether tests should run.
15
+
16
+ Usage::
17
+
18
+ resolver = SkipperResolver(config)
19
+ resolver.initialize() # fetches spreadsheet once
20
+ if not resolver.is_test_enabled("tests/test_auth.py > test_login"):
21
+ pytest.skip("disabled")
22
+ """
23
+
24
+ def __init__(self, config: SkipperConfig) -> None:
25
+ self._config = config
26
+ self._client = SheetsClient(config)
27
+ # Normalized test ID → disabledUntil (None means "in sheet, no date")
28
+ self._cache: dict[str, datetime | None] = {}
29
+
30
+ def initialize(self) -> None:
31
+ """Fetch the spreadsheet and populate the internal cache. Call once before use."""
32
+ log("initializing resolver")
33
+ result = self._client.fetch_all()
34
+ for entry in result.entries:
35
+ nid = normalize_test_id(entry.test_id)
36
+ self._cache[nid] = entry.disabled_until
37
+ logf("loaded %d test entries from spreadsheet", len(self._cache))
38
+
39
+ def is_test_enabled(self, test_id: str) -> bool:
40
+ """Return True if the test should run.
41
+
42
+ - Tests not in the spreadsheet always run (opt-out model).
43
+ - Tests with no date, or a past/today date, run normally.
44
+ - Tests with a future disabledUntil date are skipped.
45
+ """
46
+ nid = normalize_test_id(test_id)
47
+ if nid not in self._cache:
48
+ return True
49
+ disabled_until = self._cache[nid]
50
+ if disabled_until is None:
51
+ return True
52
+ now = datetime.now(tz=timezone.utc)
53
+ # Make naive datetimes comparable by treating them as UTC.
54
+ if disabled_until.tzinfo is None:
55
+ disabled_until = disabled_until.replace(tzinfo=timezone.utc)
56
+ return disabled_until <= now
57
+
58
+ def get_disabled_until(self, test_id: str) -> datetime | None:
59
+ """Return the disabledUntil date for a test, or None if not set."""
60
+ nid = normalize_test_id(test_id)
61
+ return self._cache.get(nid)
62
+
63
+ def marshal_cache(self) -> bytes:
64
+ """Serialize the cache to JSON bytes for cross-process sharing."""
65
+ out: dict[str, str | None] = {}
66
+ for k, v in self._cache.items():
67
+ out[k] = v.isoformat() if v is not None else None
68
+ return json.dumps(out).encode()
69
+
70
+ @classmethod
71
+ def from_marshal_cache(cls, data: bytes) -> SkipperResolver:
72
+ """Rehydrate a resolver from bytes produced by marshal_cache."""
73
+ raw: dict[str, Any] = json.loads(data)
74
+ cache: dict[str, datetime | None] = {}
75
+ for k, v in raw.items():
76
+ if v is None:
77
+ cache[k] = None
78
+ else:
79
+ cache[k] = datetime.fromisoformat(v)
80
+ resolver = cls.__new__(cls)
81
+ resolver._cache = cache
82
+ resolver._config = None # type: ignore[assignment]
83
+ resolver._client = None # type: ignore[assignment]
84
+ return resolver
@@ -0,0 +1,57 @@
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import re
5
+ from pathlib import Path
6
+
7
+ _WHITESPACE_RE = re.compile(r"\s+")
8
+
9
+
10
+ def normalize_test_id(test_id: str) -> str:
11
+ """Lowercase, strip, and collapse whitespace for case-insensitive matching."""
12
+ return _WHITESPACE_RE.sub(" ", test_id.strip().lower())
13
+
14
+
15
+ def build_test_id(file_path: str, title_parts: list[str]) -> str:
16
+ """Build a test ID in the format: 'path/to/test.py > part1 > part2'.
17
+
18
+ If file_path is absolute it is made relative to the project root
19
+ (located by searching for pyproject.toml or setup.py).
20
+ Path separators are normalised to forward slashes.
21
+ """
22
+ rel = _to_relative_path(file_path)
23
+ return " > ".join([rel, *title_parts])
24
+
25
+
26
+ def _to_relative_path(file_path: str) -> str:
27
+ p = Path(file_path)
28
+ if p.is_absolute():
29
+ root = _find_project_root(p)
30
+ if root is None:
31
+ import os
32
+
33
+ root = Path(os.getcwd())
34
+ with contextlib.suppress(ValueError):
35
+ p = p.relative_to(root)
36
+ return str(p).replace("\\", "/")
37
+
38
+
39
+ def _find_project_root(file_path: Path) -> Path | None:
40
+ """Walk up looking for pyproject.toml (workspace root) or setup.py (module root)."""
41
+ # Prefer pyproject.toml (uv workspace) over setup.py (legacy).
42
+ pyproject_root: Path | None = None
43
+ for parent in [file_path.parent, *file_path.parent.parents]:
44
+ if (parent / "pyproject.toml").exists():
45
+ # Keep walking up — we want the *outermost* pyproject.toml
46
+ # (i.e., the workspace root, not a sub-package root).
47
+ pyproject_root = parent
48
+ if (parent / "uv.lock").exists():
49
+ # uv workspace root — highest priority, stop immediately.
50
+ return parent
51
+ if pyproject_root is not None:
52
+ return pyproject_root
53
+ # Fallback: look for setup.py
54
+ for parent in [file_path.parent, *file_path.parent.parents]:
55
+ if (parent / "setup.py").exists():
56
+ return parent
57
+ return None
@@ -0,0 +1,108 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ from .client import SheetsClient, _index_of
6
+ from .config import SkipperConfig
7
+ from .logger import logf
8
+ from .testid import normalize_test_id
9
+
10
+
11
+ class SheetsWriter:
12
+ """Reconciles a Google Spreadsheet with a set of discovered test IDs.
13
+
14
+ Sync behaviour:
15
+ - Rows whose test ID is no longer discovered (and belong to the same files
16
+ as the current sync) are deleted.
17
+ - Newly discovered test IDs are appended with an empty disabledUntil.
18
+ """
19
+
20
+ def __init__(self, config: SkipperConfig) -> None:
21
+ self._config = config
22
+ self._client = SheetsClient(config)
23
+
24
+ def sync(self, discovered_ids: list[str]) -> None:
25
+ logf("syncing %d discovered test IDs", len(discovered_ids))
26
+
27
+ result = self._client.fetch_all()
28
+ primary = result.primary
29
+ svc: Any = result.service
30
+
31
+ test_id_idx = _index_of(primary.header, self._config.test_id_column)
32
+ if test_id_idx < 0:
33
+ raise ValueError(
34
+ f"column {self._config.test_id_column!r} not found in sheet {primary.sheet_name!r}"
35
+ )
36
+
37
+ discovered_set = {normalize_test_id(id_) for id_ in discovered_ids}
38
+
39
+ # Determine which file paths are "owned" by this sync.
40
+ owned_files: set[str] = set()
41
+ owned_bases: set[str] = set()
42
+ for id_ in discovered_ids:
43
+ nid = normalize_test_id(id_)
44
+ sep = nid.find(" > ")
45
+ if sep >= 0:
46
+ file = nid[:sep]
47
+ owned_files.add(file)
48
+ base_sep = file.rfind("/")
49
+ owned_bases.add(file[base_sep + 1 :] if base_sep >= 0 else file)
50
+
51
+ existing_map = {normalize_test_id(e.test_id): e.test_id for e in primary.entries}
52
+
53
+ # Collect row indices (0-based, after header) to delete.
54
+ rows_to_delete: list[int] = []
55
+ for i, entry in enumerate(primary.entries):
56
+ nid = normalize_test_id(entry.test_id)
57
+ sep = nid.find(" > ")
58
+ if sep < 0:
59
+ # Malformed row — clean it up unconditionally.
60
+ rows_to_delete.append(i + 1)
61
+ continue
62
+ file = nid[:sep]
63
+ owned = file in owned_files
64
+ if not owned and "/" not in file:
65
+ owned = file in owned_bases
66
+ if not owned:
67
+ continue
68
+ if nid not in discovered_set:
69
+ rows_to_delete.append(i + 1)
70
+
71
+ # Delete in descending order to avoid index shifting.
72
+ if rows_to_delete:
73
+ rows_to_delete.sort(reverse=True)
74
+ requests = [
75
+ {
76
+ "deleteDimension": {
77
+ "range": {
78
+ "sheetId": primary.sheet_id,
79
+ "dimension": "ROWS",
80
+ "startIndex": row_idx,
81
+ "endIndex": row_idx + 1,
82
+ }
83
+ }
84
+ }
85
+ for row_idx in rows_to_delete
86
+ ]
87
+ svc.spreadsheets().batchUpdate(
88
+ spreadsheetId=self._config.spreadsheet_id,
89
+ body={"requests": requests},
90
+ ).execute()
91
+ logf("deleted %d rows from spreadsheet", len(rows_to_delete))
92
+
93
+ # Append new rows.
94
+ to_add = [id_ for id_ in discovered_ids if normalize_test_id(id_) not in existing_map]
95
+ if to_add:
96
+ header_len = len(primary.header)
97
+ values: list[list[Any]] = []
98
+ for id_ in to_add:
99
+ row: list[Any] = [""] * header_len
100
+ row[test_id_idx] = id_
101
+ values.append(row)
102
+ svc.spreadsheets().values().append(
103
+ spreadsheetId=self._config.spreadsheet_id,
104
+ range=primary.sheet_name,
105
+ valueInputOption="RAW",
106
+ body={"values": values},
107
+ ).execute()
108
+ logf("appended %d new test IDs to spreadsheet", len(to_add))