crossref-local 0.3.1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. crossref_local/__init__.py +38 -16
  2. crossref_local/__main__.py +0 -0
  3. crossref_local/_aio/__init__.py +30 -0
  4. crossref_local/_aio/_impl.py +238 -0
  5. crossref_local/_cache/__init__.py +15 -0
  6. crossref_local/_cache/export.py +100 -0
  7. crossref_local/_cache/utils.py +93 -0
  8. crossref_local/_cache/viz.py +296 -0
  9. crossref_local/_cli/__init__.py +9 -0
  10. crossref_local/_cli/cache.py +179 -0
  11. crossref_local/_cli/cli.py +512 -0
  12. crossref_local/_cli/completion.py +245 -0
  13. crossref_local/_cli/main.py +20 -0
  14. crossref_local/_cli/mcp.py +351 -0
  15. crossref_local/_cli/mcp_server.py +413 -0
  16. crossref_local/_core/__init__.py +58 -0
  17. crossref_local/{api.py → _core/api.py} +130 -36
  18. crossref_local/{citations.py → _core/citations.py} +55 -26
  19. crossref_local/{config.py → _core/config.py} +57 -42
  20. crossref_local/{db.py → _core/db.py} +32 -26
  21. crossref_local/{fts.py → _core/fts.py} +18 -14
  22. crossref_local/{models.py → _core/models.py} +11 -6
  23. crossref_local/{impact_factor → _impact_factor}/__init__.py +0 -0
  24. crossref_local/{impact_factor → _impact_factor}/calculator.py +0 -0
  25. crossref_local/{impact_factor → _impact_factor}/journal_lookup.py +0 -0
  26. crossref_local/_remote/__init__.py +56 -0
  27. crossref_local/_remote/base.py +356 -0
  28. crossref_local/_remote/collections.py +175 -0
  29. crossref_local/_server/__init__.py +140 -0
  30. crossref_local/_server/middleware.py +25 -0
  31. crossref_local/_server/models.py +129 -0
  32. crossref_local/_server/routes_citations.py +98 -0
  33. crossref_local/_server/routes_collections.py +282 -0
  34. crossref_local/_server/routes_compat.py +102 -0
  35. crossref_local/_server/routes_works.py +128 -0
  36. crossref_local/_server/server.py +19 -0
  37. crossref_local/aio.py +30 -206
  38. crossref_local/cache.py +466 -0
  39. crossref_local/cli.py +5 -447
  40. crossref_local/jobs.py +169 -0
  41. crossref_local/mcp_server.py +5 -199
  42. crossref_local/remote.py +5 -261
  43. crossref_local/server.py +5 -349
  44. {crossref_local-0.3.1.dist-info → crossref_local-0.5.0.dist-info}/METADATA +88 -24
  45. crossref_local-0.5.0.dist-info/RECORD +47 -0
  46. crossref_local-0.3.1.dist-info/RECORD +0 -20
  47. {crossref_local-0.3.1.dist-info → crossref_local-0.5.0.dist-info}/WHEEL +0 -0
  48. {crossref_local-0.3.1.dist-info → crossref_local-0.5.0.dist-info}/entry_points.txt +0 -0
crossref_local/aio.py CHANGED
@@ -1,5 +1,5 @@
1
- """
2
- Async API for crossref_local.
1
+ #!/usr/bin/env python3
2
+ """Async API module for crossref_local.
3
3
 
4
4
  Provides async versions of all API functions. Uses thread pool execution
5
5
  with per-thread database connections for thread safety.
@@ -19,210 +19,30 @@ Usage:
19
19
  counts = await aio.count_many(["CRISPR", "machine learning"])
20
20
  """
21
21
 
22
- import asyncio
23
- import threading
24
- from typing import List, Optional
25
-
26
- from .models import Work, SearchResult
27
- from .config import Config
28
- from .db import Database
29
-
30
-
31
- # Thread-local storage for database connections
32
- _thread_local = threading.local()
33
-
34
-
35
- def _get_thread_db() -> Database:
36
- """Get thread-local database connection."""
37
- if not hasattr(_thread_local, 'db'):
38
- _thread_local.db = Database(Config.get_db_path())
39
- return _thread_local.db
40
-
41
-
42
- def _search_sync(query: str, limit: int, offset: int) -> SearchResult:
43
- """Thread-safe sync search."""
44
- from . import fts
45
- # Use thread-local DB
46
- db = _get_thread_db()
47
- return fts._search_with_db(db, query, limit, offset)
48
-
49
-
50
- def _count_sync(query: str) -> int:
51
- """Thread-safe sync count."""
52
- from . import fts
53
- db = _get_thread_db()
54
- return fts._count_with_db(db, query)
55
-
56
-
57
- def _get_sync(doi: str) -> Optional[Work]:
58
- """Thread-safe sync get."""
59
- db = _get_thread_db()
60
- metadata = db.get_metadata(doi)
61
- if metadata:
62
- return Work.from_metadata(doi, metadata)
63
- return None
64
-
65
-
66
- def _get_many_sync(dois: List[str]) -> List[Work]:
67
- """Thread-safe sync get_many."""
68
- db = _get_thread_db()
69
- works = []
70
- for doi in dois:
71
- metadata = db.get_metadata(doi)
72
- if metadata:
73
- works.append(Work.from_metadata(doi, metadata))
74
- return works
75
-
76
-
77
- def _exists_sync(doi: str) -> bool:
78
- """Thread-safe sync exists."""
79
- db = _get_thread_db()
80
- row = db.fetchone("SELECT 1 FROM works WHERE doi = ?", (doi,))
81
- return row is not None
82
-
83
-
84
- def _info_sync() -> dict:
85
- """Thread-safe sync info."""
86
- db = _get_thread_db()
87
-
88
- row = db.fetchone("SELECT COUNT(*) as count FROM works")
89
- work_count = row["count"] if row else 0
90
-
91
- try:
92
- row = db.fetchone("SELECT COUNT(*) as count FROM works_fts")
93
- fts_count = row["count"] if row else 0
94
- except Exception:
95
- fts_count = 0
96
-
97
- try:
98
- row = db.fetchone("SELECT COUNT(*) as count FROM citations")
99
- citation_count = row["count"] if row else 0
100
- except Exception:
101
- citation_count = 0
102
-
103
- return {
104
- "db_path": str(Config.get_db_path()),
105
- "works": work_count,
106
- "fts_indexed": fts_count,
107
- "citations": citation_count,
108
- }
109
-
110
-
111
- async def search(
112
- query: str,
113
- limit: int = 10,
114
- offset: int = 0,
115
- ) -> SearchResult:
116
- """
117
- Async full-text search across works.
118
-
119
- Args:
120
- query: Search query (supports FTS5 syntax)
121
- limit: Maximum results to return
122
- offset: Skip first N results (for pagination)
123
-
124
- Returns:
125
- SearchResult with matching works
126
- """
127
- return await asyncio.to_thread(_search_sync, query, limit, offset)
128
-
129
-
130
- async def count(query: str) -> int:
131
- """
132
- Async count matching works without fetching results.
133
-
134
- Args:
135
- query: FTS5 search query
136
-
137
- Returns:
138
- Number of matching works
139
- """
140
- return await asyncio.to_thread(_count_sync, query)
141
-
142
-
143
- async def get(doi: str) -> Optional[Work]:
144
- """
145
- Async get a work by DOI.
146
-
147
- Args:
148
- doi: Digital Object Identifier
149
-
150
- Returns:
151
- Work object or None if not found
152
- """
153
- return await asyncio.to_thread(_get_sync, doi)
154
-
155
-
156
- async def get_many(dois: List[str]) -> List[Work]:
157
- """
158
- Async get multiple works by DOI.
159
-
160
- Args:
161
- dois: List of DOIs
162
-
163
- Returns:
164
- List of Work objects (missing DOIs are skipped)
165
- """
166
- return await asyncio.to_thread(_get_many_sync, dois)
167
-
168
-
169
- async def exists(doi: str) -> bool:
170
- """
171
- Async check if a DOI exists in the database.
172
-
173
- Args:
174
- doi: Digital Object Identifier
175
-
176
- Returns:
177
- True if DOI exists
178
- """
179
- return await asyncio.to_thread(_exists_sync, doi)
180
-
181
-
182
- async def info() -> dict:
183
- """
184
- Async get database information.
185
-
186
- Returns:
187
- Dictionary with database stats
188
- """
189
- return await asyncio.to_thread(_info_sync)
190
-
191
-
192
- async def search_many(queries: List[str], limit: int = 10) -> List[SearchResult]:
193
- """
194
- Run multiple searches concurrently.
195
-
196
- Args:
197
- queries: List of search queries
198
- limit: Maximum results per query
199
-
200
- Returns:
201
- List of SearchResult objects
202
- """
203
- tasks = [search(q, limit=limit) for q in queries]
204
- return await asyncio.gather(*tasks)
205
-
206
-
207
- async def count_many(queries: List[str]) -> dict:
208
- """
209
- Count matches for multiple queries concurrently.
210
-
211
- Args:
212
- queries: List of search queries
213
-
214
- Returns:
215
- Dict mapping query -> count
216
-
217
- Example:
218
- >>> counts = await count_many(["CRISPR", "machine learning"])
219
- >>> print(counts)
220
- {'CRISPR': 45000, 'machine learning': 477922}
221
- """
222
- tasks = [count(q) for q in queries]
223
- results = await asyncio.gather(*tasks)
224
- return dict(zip(queries, results))
225
-
22
+ from ._aio import (
23
+ SearchResult as _SearchResult,
24
+ Work as _Work,
25
+ count as _count,
26
+ count_many as _count_many,
27
+ exists as _exists,
28
+ get as _get,
29
+ get_many as _get_many,
30
+ info as _info,
31
+ search as _search,
32
+ search_many as _search_many,
33
+ )
34
+
35
+ # Re-export with clean names
36
+ search = _search
37
+ count = _count
38
+ get = _get
39
+ get_many = _get_many
40
+ exists = _exists
41
+ info = _info
42
+ search_many = _search_many
43
+ count_many = _count_many
44
+ SearchResult = _SearchResult
45
+ Work = _Work
226
46
 
227
47
  __all__ = [
228
48
  "search",
@@ -233,4 +53,8 @@ __all__ = [
233
53
  "info",
234
54
  "search_many",
235
55
  "count_many",
56
+ "SearchResult",
57
+ "Work",
236
58
  ]
59
+
60
+ # EOF
@@ -0,0 +1,466 @@
1
+ """Cache module for crossref-local.
2
+
3
+ Provides disk-based caching of paper metadata to reduce context usage
4
+ and enable efficient re-querying with field filtering.
5
+
6
+ Architecture:
7
+ 1. FTS search -> DOIs (fast, minimal)
8
+ 2. Cache DOIs -> full metadata saved to disk
9
+ 3. Query cache -> filtered fields based on need
10
+
11
+ Usage:
12
+ >>> from crossref_local import cache
13
+ >>> # Create cache from search
14
+ >>> cache.create("epilepsy", query="epilepsy seizure prediction", limit=100)
15
+ >>> # Query with minimal fields
16
+ >>> papers = cache.query("epilepsy", fields=["doi", "title", "year"])
17
+ >>> # Get statistics
18
+ >>> stats = cache.stats("epilepsy")
19
+ """
20
+
21
+ import json as _json
22
+ import time as _time
23
+ from dataclasses import dataclass as _dataclass
24
+ from typing import Any as _Any
25
+ from typing import Dict as _Dict
26
+ from typing import List as _List
27
+ from typing import Optional as _Optional
28
+
29
+ from ._core.api import get_many as _get_many
30
+ from ._core.api import search as _search
31
+ from ._cache.utils import cache_path as _cache_path
32
+ from ._cache.utils import get_cache_dir as _get_cache_dir
33
+ from ._cache.utils import meta_path as _meta_path
34
+
35
+ __all__ = [
36
+ "CacheInfo",
37
+ "create",
38
+ "append",
39
+ "load",
40
+ "query",
41
+ "query_dois",
42
+ "stats",
43
+ "info",
44
+ "exists",
45
+ "list_caches",
46
+ "delete",
47
+ "export",
48
+ ]
49
+
50
+
51
+ @_dataclass
52
+ class CacheInfo:
53
+ """Information about a cache."""
54
+
55
+ name: str
56
+ path: str
57
+ size_bytes: int
58
+ paper_count: int
59
+ created_at: str
60
+ query: _Optional[str] = None
61
+
62
+ def to_dict(self) -> dict:
63
+ return {
64
+ "name": self.name,
65
+ "path": self.path,
66
+ "size_bytes": self.size_bytes,
67
+ "size_mb": round(self.size_bytes / 1024 / 1024, 2),
68
+ "paper_count": self.paper_count,
69
+ "created_at": self.created_at,
70
+ "query": self.query,
71
+ }
72
+
73
+
74
+ def create(
75
+ name: str,
76
+ query: _Optional[str] = None,
77
+ dois: _Optional[_List[str]] = None,
78
+ papers: _Optional[_List[_Dict[str, _Any]]] = None,
79
+ limit: int = 1000,
80
+ offset: int = 0,
81
+ user_id: _Optional[str] = None,
82
+ ) -> CacheInfo:
83
+ """Create a cache from search query, DOI list, or pre-fetched papers.
84
+
85
+ Args:
86
+ name: Cache name (used as filename)
87
+ query: FTS search query (if dois/papers not provided)
88
+ dois: Explicit list of DOIs to cache
89
+ papers: Pre-fetched paper dicts (skips API calls)
90
+ limit: Max papers to fetch (for query mode)
91
+ offset: Offset for pagination (for query mode)
92
+ user_id: _Optional user ID for multi-tenant scoping
93
+
94
+ Returns:
95
+ CacheInfo with cache details
96
+
97
+ Example:
98
+ >>> create("epilepsy", query="epilepsy seizure", limit=500)
99
+ >>> create("my_papers", dois=["10.1038/nature12373", ...])
100
+ >>> create("imported", papers=[{"doi": "...", "title": "..."}])
101
+ """
102
+ if papers is not None:
103
+ # Use pre-fetched papers directly
104
+ pass
105
+ elif dois is None and query is None:
106
+ raise ValueError("Must provide 'query', 'dois', or 'papers'")
107
+ elif dois is None:
108
+ # Get DOIs from search
109
+ results = _search(query, limit=limit, offset=offset)
110
+ dois = [w.doi for w in results.works]
111
+ # Fetch full metadata
112
+ works = _get_many(dois)
113
+ papers = [w.to_dict() for w in works]
114
+ else:
115
+ # Fetch full metadata for DOIs
116
+ works = _get_many(dois)
117
+ papers = [w.to_dict() for w in works]
118
+
119
+ # Save cache
120
+ cache_file = _cache_path(name, user_id)
121
+ with open(cache_file, "w") as f:
122
+ _json.dump(papers, f)
123
+
124
+ # Save metadata
125
+ meta = {
126
+ "name": name,
127
+ "query": query,
128
+ "created_at": _time.strftime("%Y-%m-%d %H:%M:%S"),
129
+ "paper_count": len(papers),
130
+ "dois_requested": len(dois) if dois else len(papers),
131
+ }
132
+ with open(_meta_path(name, user_id), "w") as f:
133
+ _json.dump(meta, f, indent=2)
134
+
135
+ return CacheInfo(
136
+ name=name,
137
+ path=str(cache_file),
138
+ size_bytes=cache_file.stat().st_size,
139
+ paper_count=len(papers),
140
+ created_at=meta["created_at"],
141
+ query=query,
142
+ )
143
+
144
+
145
+ def append(
146
+ name: str,
147
+ query: _Optional[str] = None,
148
+ dois: _Optional[_List[str]] = None,
149
+ limit: int = 1000,
150
+ offset: int = 0,
151
+ user_id: _Optional[str] = None,
152
+ ) -> CacheInfo:
153
+ """Append papers to existing cache.
154
+
155
+ Args:
156
+ name: Existing cache name
157
+ query: FTS search query (if dois not provided)
158
+ dois: Explicit list of DOIs to add
159
+ limit: Max papers to fetch (for query mode)
160
+ offset: Offset for pagination (for query mode)
161
+ user_id: _Optional user ID for multi-tenant scoping
162
+
163
+ Returns:
164
+ Updated CacheInfo
165
+ """
166
+ if not exists(name, user_id=user_id):
167
+ return create(
168
+ name, query=query, dois=dois, limit=limit, offset=offset, user_id=user_id
169
+ )
170
+
171
+ # Load existing
172
+ existing = load(name, user_id=user_id)
173
+ existing_dois = {p["doi"] for p in existing}
174
+
175
+ # Get new DOIs
176
+ if dois is None and query is not None:
177
+ results = _search(query, limit=limit, offset=offset)
178
+ dois = [w.doi for w in results.works]
179
+ elif dois is None:
180
+ raise ValueError("Must provide either 'query' or 'dois'")
181
+
182
+ # Filter out already cached
183
+ new_dois = [d for d in dois if d not in existing_dois]
184
+
185
+ if new_dois:
186
+ # Fetch new metadata
187
+ new_works = _get_many(new_dois)
188
+ new_papers = [w.to_dict() for w in new_works]
189
+
190
+ # Combine and save
191
+ all_papers = existing + new_papers
192
+ cache_file = _cache_path(name, user_id)
193
+ with open(cache_file, "w") as f:
194
+ _json.dump(all_papers, f)
195
+
196
+ # Update metadata
197
+ meta_file = _meta_path(name, user_id)
198
+ if meta_file.exists():
199
+ with open(meta_file) as f:
200
+ meta = _json.load(f)
201
+ else:
202
+ meta = {"name": name}
203
+
204
+ meta["updated_at"] = _time.strftime("%Y-%m-%d %H:%M:%S")
205
+ meta["paper_count"] = len(all_papers)
206
+
207
+ with open(meta_file, "w") as f:
208
+ _json.dump(meta, f, indent=2)
209
+
210
+ return info(name, user_id=user_id)
211
+
212
+ return info(name, user_id=user_id)
213
+
214
+
215
+ def load(name: str, user_id: _Optional[str] = None) -> _List[_Dict[str, _Any]]:
216
+ """Load raw cache data.
217
+
218
+ Args:
219
+ name: Cache name
220
+ user_id: _Optional user ID for multi-tenant scoping
221
+
222
+ Returns:
223
+ List of paper dictionaries with full metadata
224
+ """
225
+ cache_file = _cache_path(name, user_id)
226
+ if not cache_file.exists():
227
+ raise FileNotFoundError(f"Cache not found: {name}")
228
+
229
+ with open(cache_file) as f:
230
+ return _json.load(f)
231
+
232
+
233
+ def query(
234
+ name: str,
235
+ fields: _Optional[_List[str]] = None,
236
+ include_abstract: bool = False,
237
+ include_references: bool = False,
238
+ include_citations: bool = False,
239
+ year_min: _Optional[int] = None,
240
+ year_max: _Optional[int] = None,
241
+ journal: _Optional[str] = None,
242
+ limit: _Optional[int] = None,
243
+ user_id: _Optional[str] = None,
244
+ ) -> _List[_Dict[str, _Any]]:
245
+ """Query cache with field filtering.
246
+
247
+ Args:
248
+ name: Cache name
249
+ fields: Explicit field list (overrides include_* flags)
250
+ include_abstract: Include abstract field
251
+ include_references: Include references list
252
+ include_citations: Include citation_count
253
+ year_min: Filter by minimum year
254
+ year_max: Filter by maximum year
255
+ journal: Filter by journal name (substring match)
256
+ limit: Max results to return
257
+ user_id: _Optional user ID for multi-tenant scoping
258
+
259
+ Returns:
260
+ Filtered list of paper dictionaries
261
+
262
+ Example:
263
+ >>> # Minimal query
264
+ >>> papers = query("epilepsy", fields=["doi", "title", "year"])
265
+ >>> # With filters
266
+ >>> papers = query("epilepsy", year_min=2020, include_citations=True)
267
+ """
268
+ papers = load(name, user_id=user_id)
269
+
270
+ # Apply filters
271
+ if year_min is not None:
272
+ papers = [p for p in papers if p.get("year") and p["year"] >= year_min]
273
+ if year_max is not None:
274
+ papers = [p for p in papers if p.get("year") and p["year"] <= year_max]
275
+ if journal is not None:
276
+ journal_lower = journal.lower()
277
+ papers = [
278
+ p
279
+ for p in papers
280
+ if p.get("journal") and journal_lower in p["journal"].lower()
281
+ ]
282
+
283
+ # Apply limit
284
+ if limit is not None:
285
+ papers = papers[:limit]
286
+
287
+ # Field projection
288
+ if fields is not None:
289
+ # Explicit field list
290
+ papers = [{k: p.get(k) for k in fields if k in p} for p in papers]
291
+ else:
292
+ # Build field list from flags
293
+ base_fields = {"doi", "title", "authors", "year", "journal"}
294
+ if include_abstract:
295
+ base_fields.add("abstract")
296
+ if include_references:
297
+ base_fields.add("references")
298
+ if include_citations:
299
+ base_fields.add("citation_count")
300
+
301
+ papers = [{k: p.get(k) for k in base_fields if k in p} for p in papers]
302
+
303
+ return papers
304
+
305
+
306
+ def query_dois(name: str, user_id: _Optional[str] = None) -> _List[str]:
307
+ """Get just DOIs from cache.
308
+
309
+ Args:
310
+ name: Cache name
311
+ user_id: _Optional user ID for multi-tenant scoping
312
+
313
+ Returns:
314
+ List of DOIs
315
+ """
316
+ papers = load(name, user_id=user_id)
317
+ return [p["doi"] for p in papers if p.get("doi")]
318
+
319
+
320
+ def stats(name: str, user_id: _Optional[str] = None) -> _Dict[str, _Any]:
321
+ """Get cache statistics.
322
+
323
+ Args:
324
+ name: Cache name
325
+ user_id: _Optional user ID for multi-tenant scoping
326
+
327
+ Returns:
328
+ Dictionary with statistics
329
+ """
330
+ papers = load(name, user_id=user_id)
331
+
332
+ # Year distribution
333
+ years = [p.get("year") for p in papers if p.get("year")]
334
+ year_dist = {}
335
+ for y in years:
336
+ year_dist[y] = year_dist.get(y, 0) + 1
337
+
338
+ # Journal distribution
339
+ journals = [p.get("journal") for p in papers if p.get("journal")]
340
+ journal_dist = {}
341
+ for j in journals:
342
+ journal_dist[j] = journal_dist.get(j, 0) + 1
343
+ top_journals = sorted(journal_dist.items(), key=lambda x: -x[1])[:20]
344
+
345
+ # Abstract coverage
346
+ with_abstract = sum(1 for p in papers if p.get("abstract"))
347
+
348
+ # Citation stats
349
+ citations = [p.get("citation_count", 0) for p in papers if p.get("citation_count")]
350
+
351
+ return {
352
+ "paper_count": len(papers),
353
+ "year_range": {
354
+ "min": min(years) if years else None,
355
+ "max": max(years) if years else None,
356
+ },
357
+ "year_distribution": dict(sorted(year_dist.items())),
358
+ "with_abstract": with_abstract,
359
+ "abstract_coverage": round(with_abstract / len(papers) * 100, 1)
360
+ if papers
361
+ else 0,
362
+ "top_journals": [{"journal": j, "count": c} for j, c in top_journals],
363
+ "citation_stats": {
364
+ "total": sum(citations),
365
+ "mean": round(sum(citations) / len(citations), 1) if citations else 0,
366
+ "max": max(citations) if citations else 0,
367
+ }
368
+ if citations
369
+ else None,
370
+ }
371
+
372
+
373
+ def info(name: str, user_id: _Optional[str] = None) -> CacheInfo:
374
+ """Get cache information.
375
+
376
+ Args:
377
+ name: Cache name
378
+ user_id: _Optional user ID for multi-tenant scoping
379
+
380
+ Returns:
381
+ CacheInfo object
382
+ """
383
+ cache_file = _cache_path(name, user_id)
384
+ if not cache_file.exists():
385
+ raise FileNotFoundError(f"Cache not found: {name}")
386
+
387
+ meta_file = _meta_path(name, user_id)
388
+ meta = {}
389
+ if meta_file.exists():
390
+ with open(meta_file) as f:
391
+ meta = _json.load(f)
392
+
393
+ papers = load(name, user_id=user_id)
394
+
395
+ return CacheInfo(
396
+ name=name,
397
+ path=str(cache_file),
398
+ size_bytes=cache_file.stat().st_size,
399
+ paper_count=len(papers),
400
+ created_at=meta.get("created_at", "unknown"),
401
+ query=meta.get("query"),
402
+ )
403
+
404
+
405
+ def exists(name: str, user_id: _Optional[str] = None) -> bool:
406
+ """Check if cache exists.
407
+
408
+ Args:
409
+ name: Cache name
410
+ user_id: _Optional user ID for multi-tenant scoping
411
+
412
+ Returns:
413
+ True if cache exists
414
+ """
415
+ return _cache_path(name, user_id).exists()
416
+
417
+
418
+ def list_caches(user_id: _Optional[str] = None) -> _List[CacheInfo]:
419
+ """List all available caches.
420
+
421
+ Args:
422
+ user_id: _Optional user ID for multi-tenant scoping
423
+
424
+ Returns:
425
+ List of CacheInfo objects
426
+ """
427
+ cache_dir = _get_cache_dir(user_id)
428
+ caches = []
429
+
430
+ for f in cache_dir.glob("*.json"):
431
+ if f.name.endswith(".meta.json"):
432
+ continue
433
+ name = f.stem
434
+ try:
435
+ caches.append(info(name, user_id=user_id))
436
+ except Exception:
437
+ pass
438
+
439
+ return sorted(caches, key=lambda c: c.name)
440
+
441
+
442
+ def delete(name: str, user_id: _Optional[str] = None) -> bool:
443
+ """Delete a cache.
444
+
445
+ Args:
446
+ name: Cache name
447
+ user_id: _Optional user ID for multi-tenant scoping
448
+
449
+ Returns:
450
+ True if deleted
451
+ """
452
+ cache_file = _cache_path(name, user_id)
453
+ meta_file = _meta_path(name, user_id)
454
+
455
+ deleted = False
456
+ if cache_file.exists():
457
+ cache_file.unlink()
458
+ deleted = True
459
+ if meta_file.exists():
460
+ meta_file.unlink()
461
+
462
+ return deleted
463
+
464
+
465
+ # Re-export from cache_export for backwards compatibility
466
+ from ._cache.export import export