academic-refchecker 1.2.54__py3-none-any.whl → 1.2.56__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. {academic_refchecker-1.2.54.dist-info → academic_refchecker-1.2.56.dist-info}/METADATA +23 -23
  2. academic_refchecker-1.2.56.dist-info/RECORD +49 -0
  3. academic_refchecker-1.2.56.dist-info/entry_points.txt +2 -0
  4. academic_refchecker-1.2.56.dist-info/top_level.txt +1 -0
  5. refchecker/__init__.py +13 -0
  6. refchecker/__main__.py +11 -0
  7. refchecker/__version__.py +5 -0
  8. {checkers → refchecker/checkers}/crossref.py +5 -5
  9. {checkers → refchecker/checkers}/enhanced_hybrid_checker.py +1 -1
  10. {checkers → refchecker/checkers}/github_checker.py +4 -4
  11. {checkers → refchecker/checkers}/local_semantic_scholar.py +10 -11
  12. {checkers → refchecker/checkers}/openalex.py +6 -6
  13. {checkers → refchecker/checkers}/openreview_checker.py +8 -8
  14. {checkers → refchecker/checkers}/pdf_paper_checker.py +1 -1
  15. {checkers → refchecker/checkers}/semantic_scholar.py +47 -33
  16. {checkers → refchecker/checkers}/webpage_checker.py +3 -3
  17. {core → refchecker/core}/parallel_processor.py +6 -6
  18. {core → refchecker/core}/refchecker.py +72 -76
  19. {llm → refchecker/llm}/providers.py +17 -1
  20. {services → refchecker/services}/pdf_processor.py +22 -2
  21. {utils → refchecker/utils}/arxiv_utils.py +3 -3
  22. {utils → refchecker/utils}/biblatex_parser.py +4 -4
  23. {utils → refchecker/utils}/bibliography_utils.py +5 -5
  24. {utils → refchecker/utils}/bibtex_parser.py +5 -5
  25. {utils → refchecker/utils}/error_utils.py +9 -9
  26. {utils → refchecker/utils}/text_utils.py +10 -10
  27. {utils → refchecker/utils}/url_utils.py +8 -5
  28. __version__.py +0 -3
  29. academic_refchecker-1.2.54.dist-info/RECORD +0 -47
  30. academic_refchecker-1.2.54.dist-info/entry_points.txt +0 -2
  31. academic_refchecker-1.2.54.dist-info/top_level.txt +0 -9
  32. {academic_refchecker-1.2.54.dist-info → academic_refchecker-1.2.56.dist-info}/WHEEL +0 -0
  33. {academic_refchecker-1.2.54.dist-info → academic_refchecker-1.2.56.dist-info}/licenses/LICENSE +0 -0
  34. {checkers → refchecker/checkers}/__init__.py +0 -0
  35. {config → refchecker/config}/__init__.py +0 -0
  36. {config → refchecker/config}/logging.conf +0 -0
  37. {config → refchecker/config}/settings.py +0 -0
  38. {core → refchecker/core}/__init__.py +0 -0
  39. {core → refchecker/core}/db_connection_pool.py +0 -0
  40. {database → refchecker/database}/__init__.py +0 -0
  41. {database → refchecker/database}/download_semantic_scholar_db.py +0 -0
  42. {llm → refchecker/llm}/__init__.py +0 -0
  43. {llm → refchecker/llm}/base.py +0 -0
  44. {scripts → refchecker/scripts}/__init__.py +0 -0
  45. {scripts → refchecker/scripts}/start_vllm_server.py +0 -0
  46. {services → refchecker/services}/__init__.py +0 -0
  47. {utils → refchecker/utils}/__init__.py +0 -0
  48. {utils → refchecker/utils}/author_utils.py +0 -0
  49. {utils → refchecker/utils}/config_validator.py +0 -0
  50. {utils → refchecker/utils}/db_utils.py +0 -0
  51. {utils → refchecker/utils}/doi_utils.py +0 -0
  52. {utils → refchecker/utils}/mock_objects.py +0 -0
  53. {utils → refchecker/utils}/unicode_utils.py +0 -0
@@ -32,7 +32,7 @@ def extract_arxiv_id_from_paper(paper):
32
32
 
33
33
  if hasattr(paper, 'pdf_url') and paper.pdf_url:
34
34
  # Try to extract ArXiv ID from the PDF URL
35
- from utils.url_utils import extract_arxiv_id_from_url
35
+ from refchecker.utils.url_utils import extract_arxiv_id_from_url
36
36
  arxiv_id = extract_arxiv_id_from_url(paper.pdf_url)
37
37
  elif hasattr(paper, 'get_short_id'):
38
38
  # Check if the paper ID itself is an ArXiv ID
@@ -316,7 +316,7 @@ def filter_bibtex_by_citations(bib_content, tex_files, main_tex_content):
316
316
  return bib_content
317
317
 
318
318
  # Parse BibTeX entries and filter
319
- from utils.bibtex_parser import parse_bibtex_entries
319
+ from refchecker.utils.bibtex_parser import parse_bibtex_entries
320
320
  entries = parse_bibtex_entries(bib_content)
321
321
 
322
322
  # Filter entries to only cited ones and remove duplicates
@@ -481,7 +481,7 @@ def get_bibtex_content(paper):
481
481
 
482
482
  elif tex_content:
483
483
  # Check for embedded bibliography in LaTeX
484
- from utils.text_utils import detect_latex_bibliography_format
484
+ from refchecker.utils.text_utils import detect_latex_bibliography_format
485
485
  latex_format = detect_latex_bibliography_format(tex_content)
486
486
  if latex_format['is_latex'] and ('\\bibitem' in tex_content or '@' in tex_content):
487
487
  logger.info(f"Found embedded bibliography in ArXiv LaTeX source, but skipping due to formatting incompatibility")
@@ -200,8 +200,8 @@ def parse_biblatex_references(text: str) -> List[Dict[str, Any]]:
200
200
  List of structured reference dictionaries, or empty list if
201
201
  parsing quality is poor (to trigger LLM fallback)
202
202
  """
203
- from utils.text_utils import parse_authors_with_initials, clean_title
204
- from utils.doi_utils import construct_doi_url, is_valid_doi_format
203
+ from refchecker.utils.text_utils import parse_authors_with_initials, clean_title
204
+ from refchecker.utils.doi_utils import construct_doi_url, is_valid_doi_format
205
205
 
206
206
  if not text or not detect_biblatex_format(text):
207
207
  return []
@@ -300,8 +300,8 @@ def parse_biblatex_entry_content(entry_num: str, content: str) -> Dict[str, Any]
300
300
  Returns:
301
301
  Dictionary with parsed entry data
302
302
  """
303
- from utils.text_utils import parse_authors_with_initials, clean_title
304
- from utils.doi_utils import construct_doi_url, is_valid_doi_format
303
+ from refchecker.utils.text_utils import parse_authors_with_initials, clean_title
304
+ from refchecker.utils.doi_utils import construct_doi_url, is_valid_doi_format
305
305
 
306
306
  # Initialize default values
307
307
  title = ""
@@ -164,7 +164,7 @@ def _parse_bibtex_references(bibliography_text):
164
164
  Returns:
165
165
  List of reference dictionaries
166
166
  """
167
- from utils.bibtex_parser import parse_bibtex_entries
167
+ from refchecker.utils.bibtex_parser import parse_bibtex_entries
168
168
  return parse_bibtex_entries(bibliography_text)
169
169
 
170
170
 
@@ -178,7 +178,7 @@ def _parse_biblatex_references(bibliography_text):
178
178
  Returns:
179
179
  List of reference dictionaries
180
180
  """
181
- from utils.text_utils import extract_latex_references
181
+ from refchecker.utils.text_utils import extract_latex_references
182
182
  return extract_latex_references(bibliography_text)
183
183
 
184
184
 
@@ -186,7 +186,7 @@ def _parse_standard_acm_natbib_references(bibliography_text):
186
186
  """
187
187
  Parse references using regex for standard ACM/natbib format (both ACM Reference Format and simple natbib)
188
188
  """
189
- from utils.text_utils import detect_standard_acm_natbib_format
189
+ from refchecker.utils.text_utils import detect_standard_acm_natbib_format
190
190
 
191
191
  references = []
192
192
 
@@ -230,7 +230,7 @@ def _parse_simple_natbib_format(ref_num, content, label):
230
230
  Returns:
231
231
  Dictionary containing parsed reference information
232
232
  """
233
- from utils.text_utils import extract_url_from_reference, extract_year_from_reference
233
+ from refchecker.utils.text_utils import extract_url_from_reference, extract_year_from_reference
234
234
 
235
235
  # Basic parsing - this could be enhanced with more sophisticated NLP
236
236
  reference = {
@@ -288,7 +288,7 @@ def _parse_references_regex(bibliography_text):
288
288
  }
289
289
 
290
290
  # Basic information extraction
291
- from utils.text_utils import extract_url_from_reference, extract_year_from_reference
291
+ from refchecker.utils.text_utils import extract_url_from_reference, extract_year_from_reference
292
292
 
293
293
  url = extract_url_from_reference(ref_content)
294
294
  if url:
@@ -214,8 +214,8 @@ def parse_bibtex_references(bibliography_text: str) -> List[Dict[str, Any]]:
214
214
  Returns:
215
215
  List of structured reference dictionaries
216
216
  """
217
- from utils.text_utils import parse_authors_with_initials, clean_title
218
- from utils.doi_utils import construct_doi_url, is_valid_doi_format
217
+ from refchecker.utils.text_utils import parse_authors_with_initials, clean_title
218
+ from refchecker.utils.doi_utils import construct_doi_url, is_valid_doi_format
219
219
 
220
220
  entries = parse_bibtex_entries(bibliography_text)
221
221
  references = []
@@ -291,7 +291,7 @@ def parse_bibtex_references(bibliography_text: str) -> List[Dict[str, Any]]:
291
291
  # Extract other URLs
292
292
  url = fields.get('url', '')
293
293
  if url:
294
- from utils.url_utils import clean_url
294
+ from refchecker.utils.url_utils import clean_url
295
295
  url = clean_url(url)
296
296
 
297
297
  # Handle special @misc entries with only howpublished field
@@ -318,7 +318,7 @@ def parse_bibtex_references(bibliography_text: str) -> List[Dict[str, Any]]:
318
318
  url = howpublished
319
319
 
320
320
  # Clean the reconstructed URL
321
- from utils.url_utils import clean_url
321
+ from refchecker.utils.url_utils import clean_url
322
322
  url = clean_url(url)
323
323
 
324
324
  # Generate title from domain/path
@@ -350,7 +350,7 @@ def parse_bibtex_references(bibliography_text: str) -> List[Dict[str, Any]]:
350
350
 
351
351
  # Clean any URL we extracted
352
352
  if url:
353
- from utils.url_utils import clean_url
353
+ from refchecker.utils.url_utils import clean_url
354
354
  url = clean_url(url)
355
355
 
356
356
  # Construct ArXiv URL from eprint field if no URL present
@@ -42,8 +42,8 @@ def format_three_line_mismatch(mismatch_type: str, left: str, right: str) -> str
42
42
 
43
43
  Example:
44
44
  Title mismatch:
45
- 'Cited Title'
46
- vs: 'Correct Title'
45
+ cited: 'Cited Title'
46
+ actual: 'Correct Title'
47
47
 
48
48
  Args:
49
49
  mismatch_type: The type of mismatch (e.g., "Author 2 mismatch", "Title mismatch")
@@ -57,11 +57,10 @@ def format_three_line_mismatch(mismatch_type: str, left: str, right: str) -> str
57
57
  if not mismatch_type.endswith(":"):
58
58
  mismatch_type = mismatch_type.rstrip() + ":"
59
59
 
60
- # Use fixed indentation for clean, consistent alignment
61
- indent = "" # spaces for content indentation
62
- vs_indent = "" # vs: starts at column 0 for clear visual separation
60
+ # Use fixed indentation for labels, keeping detail column aligned
61
+ label_indent = " " # 7 spaces to indent labels
63
62
 
64
- return f"{mismatch_type}\n{indent}cited: '{left}'\n{vs_indent}actual: '{right}'"
63
+ return f"{mismatch_type}\n{label_indent}cited: {left}\n{label_indent}actual: {right}"
65
64
 
66
65
 
67
66
  def format_title_mismatch(cited_title: str, verified_title: str) -> str:
@@ -179,7 +178,7 @@ def clean_venue_for_comparison(venue: str) -> str:
179
178
  Returns:
180
179
  Cleaned venue name suitable for display
181
180
  """
182
- from utils.text_utils import normalize_venue_for_display
181
+ from refchecker.utils.text_utils import normalize_venue_for_display
183
182
  return normalize_venue_for_display(venue)
184
183
 
185
184
 
@@ -187,8 +186,9 @@ def format_missing_venue(correct_venue: str) -> str:
187
186
  """
188
187
  Format a missing venue message with only the actual value.
189
188
  """
190
- # Only show the actual venue; omit the empty cited line
191
- return f"Missing venue: '{correct_venue}'"
189
+ # Only show the actual venue with indented label
190
+ label_indent = " " # 7 spaces to indent labels
191
+ return f"Missing venue:\n{label_indent}actual: {correct_venue}"
192
192
 
193
193
 
194
194
  def create_venue_warning(cited_venue: str, correct_venue: str) -> Dict[str, str]:
@@ -689,7 +689,7 @@ def extract_arxiv_id_from_url(url):
689
689
  Returns:
690
690
  ArXiv ID or None if not found
691
691
  """
692
- from utils.url_utils import extract_arxiv_id_from_url as common_extract
692
+ from refchecker.utils.url_utils import extract_arxiv_id_from_url as common_extract
693
693
  return common_extract(url)
694
694
 
695
695
  def extract_year_from_text(text):
@@ -2141,7 +2141,7 @@ def compare_authors(cited_authors: list, correct_authors: list, normalize_func=N
2141
2141
  # and not penalize for the authoritative source having more authors
2142
2142
  if has_et_al:
2143
2143
  # Import here to avoid circular imports
2144
- from utils.error_utils import format_author_mismatch
2144
+ from refchecker.utils.error_utils import format_author_mismatch
2145
2145
  # For et al cases, check if each cited author matches ANY author in the correct list
2146
2146
  # rather than comparing positionally, since author order can vary
2147
2147
  for i, cited_author in enumerate(cleaned_cited):
@@ -2175,21 +2175,21 @@ def compare_authors(cited_authors: list, correct_authors: list, normalize_func=N
2175
2175
 
2176
2176
  # Check if cited authors look like parsing fragments
2177
2177
  if looks_like_fragments(cleaned_cited):
2178
- from utils.error_utils import format_author_count_mismatch
2178
+ from refchecker.utils.error_utils import format_author_count_mismatch
2179
2179
  display_cited = [format_author_for_display(author) for author in cleaned_cited]
2180
2180
  error_msg = format_author_count_mismatch(len(cleaned_cited), len(correct_names), display_cited, correct_names)
2181
2181
  return False, error_msg
2182
2182
 
2183
2183
  # For all count mismatches, show the count mismatch error
2184
2184
  if len(cleaned_cited) < len(correct_names):
2185
- from utils.error_utils import format_author_count_mismatch
2185
+ from refchecker.utils.error_utils import format_author_count_mismatch
2186
2186
  display_cited = [format_author_for_display(author) for author in cleaned_cited]
2187
2187
  error_msg = format_author_count_mismatch(len(cleaned_cited), len(correct_names), display_cited, correct_names)
2188
2188
  return False, error_msg
2189
2189
 
2190
2190
  # For cases where cited > correct, also show count mismatch
2191
2191
  elif len(cleaned_cited) > len(correct_names):
2192
- from utils.error_utils import format_author_count_mismatch
2192
+ from refchecker.utils.error_utils import format_author_count_mismatch
2193
2193
  display_cited = [format_author_for_display(author) for author in cleaned_cited]
2194
2194
  error_msg = format_author_count_mismatch(len(cleaned_cited), len(correct_names), display_cited, correct_names)
2195
2195
  return False, error_msg
@@ -2198,7 +2198,7 @@ def compare_authors(cited_authors: list, correct_authors: list, normalize_func=N
2198
2198
  comparison_correct = correct_names
2199
2199
 
2200
2200
  # Use shared three-line formatter (imported lazily to avoid circular imports)
2201
- from utils.error_utils import format_first_author_mismatch, format_author_mismatch
2201
+ from refchecker.utils.error_utils import format_first_author_mismatch, format_author_mismatch
2202
2202
 
2203
2203
  # Compare first author (most important) using the enhanced name matching
2204
2204
  if comparison_cited and comparison_correct:
@@ -2806,7 +2806,7 @@ def filter_bibtex_by_cited_keys(bib_content, cited_keys):
2806
2806
  return bib_content
2807
2807
 
2808
2808
  # Parse entries and filter
2809
- from utils.bibtex_parser import parse_bibtex_entries
2809
+ from refchecker.utils.bibtex_parser import parse_bibtex_entries
2810
2810
  entries = parse_bibtex_entries(bib_content)
2811
2811
  filtered_entries = []
2812
2812
 
@@ -3118,7 +3118,7 @@ def extract_latex_references(text, file_path=None): # pylint: disable=unused-ar
3118
3118
 
3119
3119
  if format_info['format_type'] == 'bibtex':
3120
3120
  # Use the dedicated BibTeX parser for consistent results
3121
- from utils.bibtex_parser import parse_bibtex_references
3121
+ from refchecker.utils.bibtex_parser import parse_bibtex_references
3122
3122
  return parse_bibtex_references(text)
3123
3123
 
3124
3124
  elif format_info['format_type'] == 'thebibliography':
@@ -3322,7 +3322,7 @@ def extract_latex_references(text, file_path=None): # pylint: disable=unused-ar
3322
3322
  # Extract URL if present
3323
3323
  url_match = re.search(r'\\url\{([^}]+)\}', content)
3324
3324
  if url_match:
3325
- from utils.url_utils import clean_url_punctuation
3325
+ from refchecker.utils.url_utils import clean_url_punctuation
3326
3326
  ref['url'] = clean_url_punctuation(url_match.group(1))
3327
3327
 
3328
3328
  # Extract title from \showarticletitle{} or \bibinfo{title}{}
@@ -3384,7 +3384,7 @@ def extract_latex_references(text, file_path=None): # pylint: disable=unused-ar
3384
3384
  if not ref['url']:
3385
3385
  url_match = re.search(r'\\url\{([^}]+)\}', content)
3386
3386
  if url_match:
3387
- from utils.url_utils import clean_url_punctuation
3387
+ from refchecker.utils.url_utils import clean_url_punctuation
3388
3388
  ref['url'] = clean_url_punctuation(url_match.group(1))
3389
3389
 
3390
3390
  # Extract DOI from \href{https://doi.org/...}
@@ -102,7 +102,9 @@ def construct_semantic_scholar_url(paper_id: str) -> str:
102
102
  Construct a Semantic Scholar URL from a paper ID.
103
103
 
104
104
  Args:
105
- paper_id: Semantic Scholar paper ID
105
+ paper_id: Semantic Scholar paper ID (SHA hash, NOT CorpusId)
106
+ The paperId is the 40-character hex hash that works in web URLs.
107
+ CorpusId (numeric) does NOT work in web URLs.
106
108
 
107
109
  Returns:
108
110
  Full Semantic Scholar URL
@@ -151,7 +153,7 @@ def construct_pubmed_url(pmid: str) -> str:
151
153
  return f"https://pubmed.ncbi.nlm.nih.gov/{clean_pmid}/"
152
154
 
153
155
 
154
- def get_best_available_url(external_ids: dict, open_access_pdf: Optional[str] = None) -> Optional[str]:
156
+ def get_best_available_url(external_ids: dict, open_access_pdf: Optional[str] = None, paper_id: Optional[str] = None) -> Optional[str]:
155
157
  """
156
158
  Get the best available URL from a paper's external IDs and open access information.
157
159
  Priority: Open Access PDF > DOI > ArXiv > Semantic Scholar > OpenAlex > PubMed
@@ -159,6 +161,7 @@ def get_best_available_url(external_ids: dict, open_access_pdf: Optional[str] =
159
161
  Args:
160
162
  external_ids: Dictionary of external identifiers
161
163
  open_access_pdf: Open access PDF URL if available
164
+ paper_id: Semantic Scholar paperId (SHA hash) if available
162
165
 
163
166
  Returns:
164
167
  Best available URL or None if no valid URL found
@@ -175,9 +178,9 @@ def get_best_available_url(external_ids: dict, open_access_pdf: Optional[str] =
175
178
  if external_ids.get('ArXiv'):
176
179
  return construct_arxiv_url(external_ids['ArXiv'])
177
180
 
178
- # Priority 4: Semantic Scholar URL
179
- if external_ids.get('CorpusId'):
180
- return construct_semantic_scholar_url(external_ids['CorpusId'])
181
+ # Priority 4: Semantic Scholar URL (using paperId, not CorpusId)
182
+ if paper_id:
183
+ return construct_semantic_scholar_url(paper_id)
181
184
 
182
185
  # Priority 5: OpenAlex URL
183
186
  if external_ids.get('OpenAlex'):
__version__.py DELETED
@@ -1,3 +0,0 @@
1
- """Version information for RefChecker."""
2
-
3
- __version__ = "1.2.54"
@@ -1,47 +0,0 @@
1
- __version__.py,sha256=vM_IldCgy0_CRZZSq08SaUYCl8sETF9Jyq8WJRWVIuA,65
2
- academic_refchecker-1.2.54.dist-info/licenses/LICENSE,sha256=Kwrx3fePVCeEFDCZvCW4OuoTNBiSoYbpGBI6qzGhWF0,1067
3
- checkers/__init__.py,sha256=T0PAHTFt6UiGvn-WGoJU8CdhXNmf6zaHmcGVoWHhmJQ,533
4
- checkers/crossref.py,sha256=cLYmSzE8ehJ5sNko_R3fEiGBGiPH5_HxLhFM-pCfDRM,20378
5
- checkers/enhanced_hybrid_checker.py,sha256=rbXkzpNkd0bn4e2OooX-CcdGTwwYpgmVaFvX_xCAFsA,27777
6
- checkers/github_checker.py,sha256=BXJaBC3AloKze04j8EcQz0a79EhtVoi9_871ilV7t60,14233
7
- checkers/local_semantic_scholar.py,sha256=D8py8-yMCgN1lvhXCiMUOEA4wBkH7AQvrkM4-3LCDsU,21015
8
- checkers/openalex.py,sha256=Fbc7iscZzmXjAZxH32PDX2r2Nwo9b5Ku-Sh1Ut9KpLA,19550
9
- checkers/openreview_checker.py,sha256=3ckn6U7TN5nQBjqPacr8W8mm2uMo6aWWB6gsxTDNCPk,40452
10
- checkers/pdf_paper_checker.py,sha256=L5HRHd3xpo0xDltZGTAA-Wk_arIS9bQV8ITeuxW0bNc,19893
11
- checkers/semantic_scholar.py,sha256=wk6e8DkYJM_O2nWsi-6EfJT53PzfL8KCmX1rS562KKc,34962
12
- checkers/webpage_checker.py,sha256=REOotx7Qka86_xbOIMeYj5YVb9D1RVMb4Ye311-28cA,43620
13
- config/__init__.py,sha256=r7sONsX2-ITviUJRU1KEz76uAuTRqZlzU-TVkvFRGYY,15
14
- config/logging.conf,sha256=r1tP0ApLHtlz7rV-oKS1MVO7oXJOgahbZFTtYmKnf9U,687
15
- config/settings.py,sha256=-vODFoXbWbGPUElpmchE5zbCj_n4Vtxr8HU1hQDFp_c,6164
16
- core/__init__.py,sha256=1T2MSQyDk0u_PupbHvm4CvNNN--dxsw78fqKUrqoYrM,157
17
- core/db_connection_pool.py,sha256=XRiOdehikkSz3obH4WKgf8woa3694if50Q15rBT-4XQ,4697
18
- core/parallel_processor.py,sha256=cq_WfzXrF2EI6IKOtJd6_QcwvM1xT3J6a13teg-wSbM,17638
19
- core/refchecker.py,sha256=-QIT5eUQaPCuQy7S80sXCvtrmcjdH5lf5wdZvsPQO9w,286416
20
- database/__init__.py,sha256=mEuVHlEBuS44t_2ZT_JnvQQrlRCjo1SJq1NmaJ6r8OY,125
21
- database/download_semantic_scholar_db.py,sha256=waN4I97KC_36YMiPbiBDUUmgfzu1nub5yeKdAsIR2aw,75276
22
- llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- llm/base.py,sha256=uMF-KOqZ9ZQ7rccOQLpKJiW9sEMMxr7ePXBSF0yYDJY,16782
24
- llm/providers.py,sha256=A0usJpprCO5D-VX0hqaQzBfi4DG3rdjA39vu02XJsGw,40092
25
- scripts/__init__.py,sha256=xJwo6afG8s7S888BK2Bxw2d7FX8aLkbl0l_ZoJOFibE,37
26
- scripts/start_vllm_server.py,sha256=ZepWp2y2cKFW0Kgsoima2RbmF02fTU29UFcLLpsBhFU,4213
27
- services/__init__.py,sha256=jGi9S74Msak3YR-C4Qb68VU7HB4oLaX9o1rlVAFpOFI,187
28
- services/pdf_processor.py,sha256=vu_JnhFGZY6jFVbDbPvG-mlQojvB-3Dzc8_946KVV2E,9427
29
- utils/__init__.py,sha256=1RrGoIIn1_gVzxd56b6a7HeAS-wu7uDP-nxLbR3fJ-8,1199
30
- utils/arxiv_utils.py,sha256=EzH1PhEAW0df5mmSP-kKHmuwqd4u2CSotRNwQ5IMJx8,19766
31
- utils/author_utils.py,sha256=DLTo1xsxef2wxoe4s_MWrh36maj4fgnvFlsDLpDE-qQ,5507
32
- utils/biblatex_parser.py,sha256=OkHXQcjiBrEDuhBfEk0RtmAYxufu5lAxAjb8__DzMjI,25537
33
- utils/bibliography_utils.py,sha256=mpmdAklzAs1CT3gqrOcjujGhouL95OuliCx0LE9Pg90,11705
34
- utils/bibtex_parser.py,sha256=a89NLy_q2kwED4QFJgxWFgPQOJBV73bIUL3RS_Urmro,15231
35
- utils/config_validator.py,sha256=rxf7K3DYmJ-BNPsmtaCNipY2BTVT-pJZ7wN-M9Y3GC8,11167
36
- utils/db_utils.py,sha256=_wSupfBlm0ILFvntQTvoj7tLDCbrYPRQrp9NDvphF_E,6281
37
- utils/doi_utils.py,sha256=ezUiRnYRpoO0U_Rqgxv1FxqmeTwPh6X8gLgSDbqg5sY,4874
38
- utils/error_utils.py,sha256=UJOH7Bp-rPV2JDY_XN38I2pSkqqPdnQoviKa4s4nK_A,12501
39
- utils/mock_objects.py,sha256=QxU-UXyHSY27IZYN8Sb8ei0JtNkpGSdMXoErrRLHXvE,6437
40
- utils/text_utils.py,sha256=KLFn8tMahx1CS_v7pbR3Phq1dGrFrTPrYmVtEw70Ps4,220868
41
- utils/unicode_utils.py,sha256=-WBKarXO756p7fd7gCeNsMag4ztDNURwFX5IVniOtwY,10366
42
- utils/url_utils.py,sha256=HdxIO8QvciP6Jp8Wd4sTSrS8JQrOMwgM7pxdUC8RJb4,9176
43
- academic_refchecker-1.2.54.dist-info/METADATA,sha256=m_vnHyC7a_B8gTjeybU5tnvMgrstj-GaRtFh4crZxHk,23256
44
- academic_refchecker-1.2.54.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
45
- academic_refchecker-1.2.54.dist-info/entry_points.txt,sha256=WdI89tYkIfz-M628PiboOfOLzTBWZAqvlF29qCVCkek,61
46
- academic_refchecker-1.2.54.dist-info/top_level.txt,sha256=6RlcQEA0kHb7-ndbKMFMZnYnJQVohgsU6BBkbEvJvEs,69
47
- academic_refchecker-1.2.54.dist-info/RECORD,,
@@ -1,2 +0,0 @@
1
- [console_scripts]
2
- academic-refchecker = core.refchecker:main
@@ -1,9 +0,0 @@
1
- __version__
2
- checkers
3
- config
4
- core
5
- database
6
- llm
7
- scripts
8
- services
9
- utils
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes