crossref-local 0.4.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. crossref_local/__init__.py +24 -10
  2. crossref_local/_aio/__init__.py +30 -0
  3. crossref_local/_aio/_impl.py +238 -0
  4. crossref_local/_cache/__init__.py +15 -0
  5. crossref_local/{cache_export.py → _cache/export.py} +27 -10
  6. crossref_local/_cache/utils.py +93 -0
  7. crossref_local/_cli/__init__.py +9 -0
  8. crossref_local/_cli/cli.py +389 -0
  9. crossref_local/_cli/mcp.py +351 -0
  10. crossref_local/_cli/mcp_server.py +457 -0
  11. crossref_local/_cli/search.py +199 -0
  12. crossref_local/_core/__init__.py +62 -0
  13. crossref_local/{api.py → _core/api.py} +26 -5
  14. crossref_local/{citations.py → _core/citations.py} +55 -26
  15. crossref_local/{config.py → _core/config.py} +40 -22
  16. crossref_local/{db.py → _core/db.py} +32 -26
  17. crossref_local/_core/export.py +344 -0
  18. crossref_local/{fts.py → _core/fts.py} +37 -14
  19. crossref_local/{models.py → _core/models.py} +120 -6
  20. crossref_local/_remote/__init__.py +56 -0
  21. crossref_local/_remote/base.py +378 -0
  22. crossref_local/_remote/collections.py +175 -0
  23. crossref_local/_server/__init__.py +140 -0
  24. crossref_local/_server/middleware.py +25 -0
  25. crossref_local/_server/models.py +143 -0
  26. crossref_local/_server/routes_citations.py +98 -0
  27. crossref_local/_server/routes_collections.py +282 -0
  28. crossref_local/_server/routes_compat.py +102 -0
  29. crossref_local/_server/routes_works.py +178 -0
  30. crossref_local/_server/server.py +19 -0
  31. crossref_local/aio.py +30 -206
  32. crossref_local/cache.py +100 -100
  33. crossref_local/cli.py +5 -515
  34. crossref_local/jobs.py +169 -0
  35. crossref_local/mcp_server.py +5 -410
  36. crossref_local/remote.py +5 -266
  37. crossref_local/server.py +5 -349
  38. {crossref_local-0.4.0.dist-info → crossref_local-0.5.1.dist-info}/METADATA +36 -11
  39. crossref_local-0.5.1.dist-info/RECORD +49 -0
  40. {crossref_local-0.4.0.dist-info → crossref_local-0.5.1.dist-info}/entry_points.txt +1 -1
  41. crossref_local/cli_mcp.py +0 -275
  42. crossref_local-0.4.0.dist-info/RECORD +0 -27
  43. /crossref_local/{cache_viz.py → _cache/viz.py} +0 -0
  44. /crossref_local/{cli_cache.py → _cli/cache.py} +0 -0
  45. /crossref_local/{cli_completion.py → _cli/completion.py} +0 -0
  46. /crossref_local/{cli_main.py → _cli/main.py} +0 -0
  47. /crossref_local/{impact_factor → _impact_factor}/__init__.py +0 -0
  48. /crossref_local/{impact_factor → _impact_factor}/calculator.py +0 -0
  49. /crossref_local/{impact_factor → _impact_factor}/journal_lookup.py +0 -0
  50. {crossref_local-0.4.0.dist-info → crossref_local-0.5.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,457 @@
1
+ """MCP server for CrossRef Local - Claude integration.
2
+
3
+ This server exposes crossref-local functionality as MCP tools,
4
+ enabling Claude Desktop and other MCP clients to search academic papers.
5
+
6
+ Usage:
7
+ crossref-local serve # stdio (Claude Desktop)
8
+ crossref-local serve -t http --port 8082 # HTTP transport
9
+ crossref-local-mcp # Direct entry point
10
+ """
11
+
12
+ import json
13
+
14
+ from fastmcp import FastMCP
15
+
16
+
17
+ from .. import (
18
+ get as _get,
19
+ info as _info,
20
+ search as _search,
21
+ )
22
+
23
+ # Initialize MCP server
24
+ mcp = FastMCP(
25
+ name="crossref-local",
26
+ instructions="Local CrossRef database with 167M+ works and full-text search. "
27
+ "Use search to find papers, search_by_doi for DOI lookup, enrich_dois to add "
28
+ "citation counts and references, and status for stats.",
29
+ )
30
+
31
+
32
+ @mcp.tool()
33
+ def search(
34
+ query: str,
35
+ limit: int = 10,
36
+ offset: int = 0,
37
+ with_abstracts: bool = False,
38
+ save_path: str | None = None,
39
+ save_format: str = "json",
40
+ ) -> str:
41
+ """Search for academic works by title, abstract, or authors.
42
+
43
+ Uses FTS5 full-text search index for fast searching across 167M+ papers.
44
+ Supports FTS5 query syntax: AND, OR, NOT, "exact phrases".
45
+
46
+ Args:
47
+ query: Search query (e.g., "machine learning", "CRISPR", "neural network AND hippocampus")
48
+ limit: Maximum number of results to return (default: 10)
49
+ offset: Skip first N results for pagination (default: 0)
50
+ with_abstracts: Include abstracts in results (default: False)
51
+ save_path: Optional file path to save results (e.g., "results.json", "papers.bib")
52
+ save_format: Output format for save_path: "text", "json", or "bibtex" (default: "json")
53
+
54
+ Returns:
55
+ JSON string with search results including total count and matching works.
56
+
57
+ Examples:
58
+ search("machine learning")
59
+ search("CRISPR", limit=20)
60
+ search("neural network AND memory", with_abstracts=True)
61
+ search("epilepsy", save_path="epilepsy.bib", save_format="bibtex")
62
+ """
63
+ results = _search(query, limit=limit, offset=offset)
64
+
65
+ # Save to file if requested
66
+ saved_path = None
67
+ if save_path:
68
+ from .._core.export import save as _save
69
+
70
+ try:
71
+ saved_path = _save(
72
+ results, save_path, format=save_format, include_abstract=with_abstracts
73
+ )
74
+ except Exception as e:
75
+ return json.dumps({"error": f"Failed to save: {e}"})
76
+
77
+ works_data = []
78
+ for work in results.works:
79
+ work_dict = {
80
+ "doi": work.doi,
81
+ "title": work.title,
82
+ "authors": work.authors,
83
+ "year": work.year,
84
+ "journal": work.journal,
85
+ }
86
+ if with_abstracts and work.abstract:
87
+ work_dict["abstract"] = work.abstract
88
+ works_data.append(work_dict)
89
+
90
+ result = {
91
+ "query": results.query,
92
+ "total": results.total,
93
+ "returned": len(works_data),
94
+ "elapsed_ms": round(results.elapsed_ms, 2),
95
+ "works": works_data,
96
+ }
97
+
98
+ if saved_path:
99
+ result["saved_to"] = saved_path
100
+
101
+ return json.dumps(result, indent=2)
102
+
103
+
104
+ @mcp.tool()
105
+ def search_by_doi(
106
+ doi: str,
107
+ as_citation: bool = False,
108
+ save_path: str | None = None,
109
+ save_format: str = "json",
110
+ ) -> str:
111
+ """Get detailed information about a work by DOI.
112
+
113
+ Args:
114
+ doi: Digital Object Identifier (e.g., "10.1038/nature12373")
115
+ as_citation: Return formatted citation instead of full metadata
116
+ save_path: Optional file path to save result (e.g., "paper.json", "paper.bib")
117
+ save_format: Output format for save_path: "text", "json", or "bibtex" (default: "json")
118
+
119
+ Returns:
120
+ JSON string with work metadata, or formatted citation string.
121
+
122
+ Examples:
123
+ search_by_doi("10.1038/nature12373")
124
+ search_by_doi("10.1126/science.aax0758", as_citation=True)
125
+ search_by_doi("10.1038/nature12373", save_path="paper.bib", save_format="bibtex")
126
+ """
127
+ work = _get(doi)
128
+
129
+ if work is None:
130
+ return json.dumps({"error": f"DOI not found: {doi}"})
131
+
132
+ # Save to file if requested
133
+ saved_path = None
134
+ if save_path:
135
+ from .._core.export import save as _save
136
+
137
+ try:
138
+ saved_path = _save(work, save_path, format=save_format)
139
+ except Exception as e:
140
+ return json.dumps({"error": f"Failed to save: {e}"})
141
+
142
+ if as_citation:
143
+ result = work.citation()
144
+ if saved_path:
145
+ result += f"\n\n(Saved to: {saved_path})"
146
+ return result
147
+
148
+ result = work.to_dict()
149
+ if saved_path:
150
+ result["saved_to"] = saved_path
151
+
152
+ return json.dumps(result, indent=2)
153
+
154
+
155
+ @mcp.tool()
156
+ def status() -> str:
157
+ """Get database statistics and status.
158
+
159
+ Returns:
160
+ JSON string with database path, work count, FTS index count, and citation count.
161
+ """
162
+ db_info = _info()
163
+ return json.dumps(db_info, indent=2)
164
+
165
+
166
+ @mcp.tool()
167
+ def enrich_dois(dois: list[str]) -> str:
168
+ """Enrich DOIs with full metadata including citation counts and references.
169
+
170
+ Use this after search() to get detailed metadata for papers.
171
+ The search() tool returns basic info (title, authors, year, journal).
172
+ This tool adds: citation_count, references, volume, issue, publisher, etc.
173
+
174
+ Typical workflow:
175
+ 1. search("epilepsy seizure prediction") -> get DOIs
176
+ 2. enrich_dois([doi1, doi2, ...]) -> get full metadata
177
+
178
+ Args:
179
+ dois: List of DOIs to enrich (e.g., ["10.1038/nature12373", "10.1126/science.aax0758"])
180
+
181
+ Returns:
182
+ JSON string with enriched works including citation_count and references.
183
+
184
+ Examples:
185
+ enrich_dois(["10.1038/nature12373"])
186
+ enrich_dois(["10.1038/s41467-017-02577-y", "10.1093/brain/aww019"])
187
+ """
188
+ from .. import get_many as _get_many
189
+
190
+ works = _get_many(dois)
191
+
192
+ works_data = []
193
+ for work in works:
194
+ works_data.append(work.to_dict())
195
+
196
+ return json.dumps(
197
+ {
198
+ "requested": len(dois),
199
+ "found": len(works_data),
200
+ "works": works_data,
201
+ },
202
+ indent=2,
203
+ )
204
+
205
+
206
+ @mcp.tool()
207
+ def cache_create(
208
+ name: str,
209
+ query: str,
210
+ limit: int = 1000,
211
+ ) -> str:
212
+ """Create a paper cache from search query.
213
+
214
+ Fetches full metadata for papers matching query and saves to disk cache.
215
+ Use this to build a reusable paper collection for a research topic.
216
+
217
+ Args:
218
+ name: Cache name (e.g., "epilepsy", "alzheimers")
219
+ query: FTS search query
220
+ limit: Max papers to cache (default: 1000)
221
+
222
+ Returns:
223
+ JSON with cache info (path, paper count, size)
224
+
225
+ Example:
226
+ cache_create("epilepsy", "epilepsy seizure prediction", limit=500)
227
+ """
228
+ from .. import cache
229
+
230
+ info = cache.create(name, query=query, limit=limit)
231
+ return json.dumps(info.to_dict(), indent=2)
232
+
233
+
234
+ @mcp.tool()
235
+ def cache_query(
236
+ name: str,
237
+ fields: list[str] | None = None,
238
+ include_abstract: bool = False,
239
+ include_references: bool = False,
240
+ include_citations: bool = False,
241
+ year_min: int | None = None,
242
+ year_max: int | None = None,
243
+ journal: str | None = None,
244
+ limit: int | None = None,
245
+ ) -> str:
246
+ """Query cached papers with field filtering.
247
+
248
+ Returns minimal data to reduce context usage. Specify only fields needed.
249
+
250
+ Args:
251
+ name: Cache name
252
+ fields: Explicit field list (e.g., ["doi", "title", "year"])
253
+ include_abstract: Include abstract (default: False)
254
+ include_references: Include references list (default: False)
255
+ include_citations: Include citation_count (default: False)
256
+ year_min: Filter by minimum year
257
+ year_max: Filter by maximum year
258
+ journal: Filter by journal name (substring match)
259
+ limit: Max results to return
260
+
261
+ Returns:
262
+ JSON array of filtered papers
263
+
264
+ Examples:
265
+ cache_query("epilepsy", fields=["doi", "title", "year"])
266
+ cache_query("epilepsy", year_min=2020, include_citations=True, limit=50)
267
+ """
268
+ from .. import cache
269
+
270
+ papers = cache.query(
271
+ name,
272
+ fields=fields,
273
+ include_abstract=include_abstract,
274
+ include_references=include_references,
275
+ include_citations=include_citations,
276
+ year_min=year_min,
277
+ year_max=year_max,
278
+ journal=journal,
279
+ limit=limit,
280
+ )
281
+ return json.dumps({"count": len(papers), "papers": papers}, indent=2)
282
+
283
+
284
+ @mcp.tool()
285
+ def cache_stats(name: str) -> str:
286
+ """Get cache statistics.
287
+
288
+ Returns year distribution, top journals, citation stats without loading full data.
289
+
290
+ Args:
291
+ name: Cache name
292
+
293
+ Returns:
294
+ JSON with statistics (paper_count, year_range, top_journals, etc.)
295
+ """
296
+ from .. import cache
297
+
298
+ stats = cache.stats(name)
299
+ return json.dumps(stats, indent=2)
300
+
301
+
302
+ @mcp.tool()
303
+ def cache_list() -> str:
304
+ """List all available caches.
305
+
306
+ Returns:
307
+ JSON array of cache info (name, path, paper_count, size)
308
+ """
309
+ from .. import cache
310
+
311
+ caches = cache.list_caches()
312
+ return json.dumps([c.to_dict() for c in caches], indent=2)
313
+
314
+
315
+ @mcp.tool()
316
+ def cache_top_cited(
317
+ name: str,
318
+ n: int = 20,
319
+ year_min: int | None = None,
320
+ year_max: int | None = None,
321
+ ) -> str:
322
+ """Get top cited papers from cache.
323
+
324
+ Args:
325
+ name: Cache name
326
+ n: Number of papers to return
327
+ year_min: Filter by minimum year
328
+ year_max: Filter by maximum year
329
+
330
+ Returns:
331
+ JSON array of top cited papers
332
+ """
333
+ from .._cache.viz import get_top_cited
334
+
335
+ papers = get_top_cited(name, n=n, year_min=year_min, year_max=year_max)
336
+ return json.dumps(papers, indent=2)
337
+
338
+
339
+ @mcp.tool()
340
+ def cache_citation_summary(name: str) -> str:
341
+ """Get citation statistics for cached papers.
342
+
343
+ Returns mean, median, max citations and counts of highly cited papers.
344
+
345
+ Args:
346
+ name: Cache name
347
+
348
+ Returns:
349
+ JSON with citation statistics
350
+ """
351
+ from .._cache.viz import get_citation_summary
352
+
353
+ summary = get_citation_summary(name)
354
+ return json.dumps(summary, indent=2)
355
+
356
+
357
+ @mcp.tool()
358
+ def cache_plot_scatter(
359
+ name: str,
360
+ output: str,
361
+ top_n: int = 10,
362
+ ) -> str:
363
+ """Generate year vs citations scatter plot.
364
+
365
+ Saves plot to file and returns top cited papers.
366
+
367
+ Args:
368
+ name: Cache name
369
+ output: Output file path (png/pdf/svg)
370
+ top_n: Number of top papers to label on plot
371
+
372
+ Returns:
373
+ JSON with output path and top papers list
374
+ """
375
+ from .._cache.viz import plot_year_citations
376
+
377
+ result = plot_year_citations(name, output=output, top_n=top_n)
378
+ return json.dumps(result, indent=2)
379
+
380
+
381
+ @mcp.tool()
382
+ def cache_plot_network(
383
+ name: str,
384
+ output: str,
385
+ max_nodes: int = 100,
386
+ ) -> str:
387
+ """Generate citation network visualization.
388
+
389
+ Creates interactive HTML graph showing citation relationships.
390
+
391
+ Args:
392
+ name: Cache name
393
+ output: Output HTML file path
394
+ max_nodes: Maximum papers to include
395
+
396
+ Returns:
397
+ JSON with network stats
398
+ """
399
+ from .._cache.viz import plot_citation_network
400
+
401
+ result = plot_citation_network(name, output=output, max_nodes=max_nodes)
402
+ return json.dumps(result, indent=2)
403
+
404
+
405
+ @mcp.tool()
406
+ def cache_export(
407
+ name: str,
408
+ output_path: str,
409
+ format: str = "json",
410
+ fields: list[str] | None = None,
411
+ ) -> str:
412
+ """Export cache to file.
413
+
414
+ Args:
415
+ name: Cache name
416
+ output_path: Output file path
417
+ format: Export format (json, csv, bibtex, dois)
418
+ fields: Fields to include (for json/csv)
419
+
420
+ Returns:
421
+ JSON with output path
422
+ """
423
+ from .. import cache
424
+
425
+ path = cache.export(name, output_path, format=format, fields=fields)
426
+ return json.dumps({"exported": path, "format": format})
427
+
428
+
429
+ def run_server(
430
+ transport: str = "stdio",
431
+ host: str = "localhost",
432
+ port: int = 8082,
433
+ ) -> None:
434
+ """Run the MCP server.
435
+
436
+ Args:
437
+ transport: Transport protocol ("stdio", "sse", or "http")
438
+ host: Host for HTTP/SSE transport
439
+ port: Port for HTTP/SSE transport
440
+ """
441
+ if transport == "stdio":
442
+ mcp.run(transport="stdio")
443
+ elif transport == "sse":
444
+ mcp.run(transport="sse", host=host, port=port)
445
+ elif transport == "http":
446
+ mcp.run(transport="streamable-http", host=host, port=port)
447
+ else:
448
+ raise ValueError(f"Unknown transport: {transport}")
449
+
450
+
451
+ def main():
452
+ """Entry point for crossref-local-mcp command."""
453
+ run_server(transport="stdio")
454
+
455
+
456
+ if __name__ == "__main__":
457
+ main()
@@ -0,0 +1,199 @@
1
+ """Search commands for crossref-local CLI."""
2
+
3
+ import json
4
+ import re
5
+ import sys
6
+ from typing import Optional
7
+
8
+ import click
9
+ from rich.console import Console
10
+
11
+ from .. import get, search
12
+ from .._core.export import save as _save
13
+
14
+ console = Console()
15
+
16
+ CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"]}
17
+
18
+
19
+ def _strip_xml_tags(text: str) -> str:
20
+ """Strip XML/JATS tags from abstract text."""
21
+ if not text:
22
+ return text
23
+ text = re.sub(r"<[^>]+>", " ", text)
24
+ text = re.sub(r"\s+", " ", text)
25
+ return text.strip()
26
+
27
+
28
+ def _get_if_fast(db, issn: str, cache: dict) -> Optional[float]:
29
+ """Fast IF lookup from OpenAlex data."""
30
+ if issn in cache:
31
+ return cache[issn]
32
+ q = "SELECT two_year_mean_citedness FROM journals_openalex WHERE issns LIKE ?"
33
+ row = db.fetchone(q, (f"%{issn}%",))
34
+ cache[issn] = row["two_year_mean_citedness"] if row else None
35
+ return cache[issn]
36
+
37
+
38
+ @click.command("search", context_settings=CONTEXT_SETTINGS)
39
+ @click.argument("query")
40
+ @click.option(
41
+ "-n", "--number", "limit", default=10, show_default=True, help="Number of results"
42
+ )
43
+ @click.option("-o", "--offset", default=0, help="Skip first N results")
44
+ @click.option("-a", "--abstracts", is_flag=True, help="Show abstracts")
45
+ @click.option("-A", "--authors", is_flag=True, help="Show authors")
46
+ @click.option(
47
+ "-if", "--impact-factor", "with_if", is_flag=True, help="Show journal impact factor"
48
+ )
49
+ @click.option("--json", "as_json", is_flag=True, help="Output as JSON")
50
+ @click.option(
51
+ "--save",
52
+ "save_path",
53
+ type=click.Path(),
54
+ help="Save results to file",
55
+ )
56
+ @click.option(
57
+ "--format",
58
+ "save_format",
59
+ type=click.Choice(["text", "json", "bibtex"]),
60
+ default="json",
61
+ help="Output format for --save (default: json)",
62
+ )
63
+ def search_cmd(
64
+ query: str,
65
+ limit: int,
66
+ offset: int,
67
+ abstracts: bool,
68
+ authors: bool,
69
+ with_if: bool,
70
+ as_json: bool,
71
+ save_path: Optional[str],
72
+ save_format: str,
73
+ ):
74
+ """Search for works by title, abstract, or authors."""
75
+ from .._core.config import Config
76
+ from .._core.db import get_db
77
+
78
+ try:
79
+ results = search(query, limit=limit, offset=offset, with_if=with_if)
80
+ except ConnectionError as e:
81
+ click.secho(f"Error: {e}", fg="red", err=True)
82
+ sys.exit(1)
83
+
84
+ # Local IF lookup only in DB mode (HTTP gets IF from API)
85
+ if_cache, db = {}, None
86
+ if with_if and Config.get_mode() != "http":
87
+ try:
88
+ db = get_db()
89
+ except FileNotFoundError:
90
+ pass
91
+
92
+ # Save to file if requested
93
+ if save_path:
94
+ try:
95
+ saved = _save(
96
+ results, save_path, format=save_format, include_abstract=abstracts
97
+ )
98
+ click.secho(
99
+ f"Saved {len(results)} results to {saved}", fg="green", err=True
100
+ )
101
+ except Exception as e:
102
+ click.secho(f"Error saving: {e}", fg="red", err=True)
103
+ sys.exit(1)
104
+
105
+ if as_json:
106
+ output = {
107
+ "query": results.query,
108
+ "total": results.total,
109
+ "elapsed_ms": results.elapsed_ms,
110
+ "works": [w.to_dict() for w in results.works],
111
+ }
112
+ click.echo(json.dumps(output, indent=2))
113
+ else:
114
+ click.secho(
115
+ f"Found {results.total:,} matches in {results.elapsed_ms:.1f}ms\n",
116
+ fg="green",
117
+ )
118
+ for i, work in enumerate(results.works, start=offset + 1):
119
+ title = _strip_xml_tags(work.title) if work.title else "Untitled"
120
+ year = f"({work.year})" if work.year else ""
121
+ click.secho(f"{i}. {title} {year}", fg="cyan", bold=True)
122
+ click.echo(f" DOI: {work.doi or 'N/A'}")
123
+ if authors and work.authors:
124
+ authors_str = ", ".join(work.authors[:5])
125
+ if len(work.authors) > 5:
126
+ authors_str += f" et al. ({len(work.authors)} total)"
127
+ click.echo(f" Authors: {authors_str}")
128
+ journal_line = f" Journal: {work.journal or 'N/A'}"
129
+ if_val = work.impact_factor or (
130
+ db and work.issn and _get_if_fast(db, work.issn, if_cache)
131
+ )
132
+ if if_val:
133
+ journal_line += f" (IF: {if_val:.2f}, OpenAlex)"
134
+ click.echo(journal_line)
135
+ if abstracts and work.abstract:
136
+ abstract = _strip_xml_tags(work.abstract)[:500]
137
+ click.echo(
138
+ f" Abstract: {abstract}{'...' if len(work.abstract) > 500 else ''}"
139
+ )
140
+ click.echo()
141
+
142
+
143
+ @click.command("search-by-doi", context_settings=CONTEXT_SETTINGS)
144
+ @click.argument("doi")
145
+ @click.option("--json", "as_json", is_flag=True, help="Output as JSON")
146
+ @click.option("--citation", is_flag=True, help="Output as citation")
147
+ @click.option(
148
+ "--save",
149
+ "save_path",
150
+ type=click.Path(),
151
+ help="Save result to file",
152
+ )
153
+ @click.option(
154
+ "--format",
155
+ "save_format",
156
+ type=click.Choice(["text", "json", "bibtex"]),
157
+ default="json",
158
+ help="Output format for --save (default: json)",
159
+ )
160
+ def search_by_doi_cmd(
161
+ doi: str,
162
+ as_json: bool,
163
+ citation: bool,
164
+ save_path: Optional[str],
165
+ save_format: str,
166
+ ):
167
+ """Search for a work by DOI."""
168
+ try:
169
+ work = get(doi)
170
+ except ConnectionError as e:
171
+ click.echo(f"Error: {e}", err=True)
172
+ click.echo("\nRun 'crossref-local status' to check configuration.", err=True)
173
+ sys.exit(1)
174
+
175
+ if work is None:
176
+ click.echo(f"DOI not found: {doi}", err=True)
177
+ sys.exit(1)
178
+
179
+ # Save to file if requested
180
+ if save_path:
181
+ try:
182
+ saved = _save(work, save_path, format=save_format)
183
+ click.secho(f"Saved to {saved}", fg="green", err=True)
184
+ except Exception as e:
185
+ click.secho(f"Error saving: {e}", fg="red", err=True)
186
+ sys.exit(1)
187
+
188
+ if as_json:
189
+ click.echo(json.dumps(work.to_dict(), indent=2))
190
+ elif citation:
191
+ click.echo(work.citation())
192
+ else:
193
+ click.echo(f"Title: {work.title}")
194
+ click.echo(f"Authors: {', '.join(work.authors)}")
195
+ click.echo(f"Year: {work.year}")
196
+ click.echo(f"Journal: {work.journal}")
197
+ click.echo(f"DOI: {work.doi}")
198
+ if work.citation_count:
199
+ click.echo(f"Citations: {work.citation_count}")
@@ -0,0 +1,62 @@
1
+ #!/usr/bin/env python3
2
+ """Internal core modules for crossref_local."""
3
+
4
+ from .api import (
5
+ configure,
6
+ configure_http,
7
+ configure_remote,
8
+ count,
9
+ enrich,
10
+ enrich_dois,
11
+ exists,
12
+ get,
13
+ get_many,
14
+ get_mode,
15
+ info,
16
+ search,
17
+ )
18
+ from .citations import (
19
+ CitationNetwork,
20
+ get_citation_count,
21
+ get_cited,
22
+ get_citing,
23
+ )
24
+ from .config import Config
25
+ from .db import Database, close_db, get_db
26
+ from .export import SUPPORTED_FORMATS, save
27
+ from .models import SearchResult, Work
28
+
29
+ __all__ = [
30
+ # API functions
31
+ "search",
32
+ "count",
33
+ "get",
34
+ "get_many",
35
+ "exists",
36
+ "enrich",
37
+ "enrich_dois",
38
+ "configure",
39
+ "configure_http",
40
+ "configure_remote",
41
+ "get_mode",
42
+ "info",
43
+ # Models
44
+ "Work",
45
+ "SearchResult",
46
+ # Citations
47
+ "get_citing",
48
+ "get_cited",
49
+ "get_citation_count",
50
+ "CitationNetwork",
51
+ # Database
52
+ "Database",
53
+ "get_db",
54
+ "close_db",
55
+ # Config
56
+ "Config",
57
+ # Export
58
+ "save",
59
+ "SUPPORTED_FORMATS",
60
+ ]
61
+
62
+ # EOF