academic-refchecker 2.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. academic_refchecker-2.0.7.dist-info/METADATA +738 -0
  2. academic_refchecker-2.0.7.dist-info/RECORD +64 -0
  3. academic_refchecker-2.0.7.dist-info/WHEEL +5 -0
  4. academic_refchecker-2.0.7.dist-info/entry_points.txt +3 -0
  5. academic_refchecker-2.0.7.dist-info/licenses/LICENSE +21 -0
  6. academic_refchecker-2.0.7.dist-info/top_level.txt +2 -0
  7. backend/__init__.py +21 -0
  8. backend/__main__.py +11 -0
  9. backend/cli.py +64 -0
  10. backend/concurrency.py +100 -0
  11. backend/database.py +711 -0
  12. backend/main.py +1367 -0
  13. backend/models.py +99 -0
  14. backend/refchecker_wrapper.py +1126 -0
  15. backend/static/assets/index-2P6L_39v.css +1 -0
  16. backend/static/assets/index-hk21nqxR.js +25 -0
  17. backend/static/favicon.svg +6 -0
  18. backend/static/index.html +15 -0
  19. backend/static/vite.svg +1 -0
  20. backend/thumbnail.py +517 -0
  21. backend/websocket_manager.py +104 -0
  22. refchecker/__init__.py +13 -0
  23. refchecker/__main__.py +11 -0
  24. refchecker/__version__.py +3 -0
  25. refchecker/checkers/__init__.py +17 -0
  26. refchecker/checkers/crossref.py +541 -0
  27. refchecker/checkers/enhanced_hybrid_checker.py +563 -0
  28. refchecker/checkers/github_checker.py +326 -0
  29. refchecker/checkers/local_semantic_scholar.py +540 -0
  30. refchecker/checkers/openalex.py +513 -0
  31. refchecker/checkers/openreview_checker.py +984 -0
  32. refchecker/checkers/pdf_paper_checker.py +493 -0
  33. refchecker/checkers/semantic_scholar.py +764 -0
  34. refchecker/checkers/webpage_checker.py +938 -0
  35. refchecker/config/__init__.py +1 -0
  36. refchecker/config/logging.conf +36 -0
  37. refchecker/config/settings.py +170 -0
  38. refchecker/core/__init__.py +7 -0
  39. refchecker/core/db_connection_pool.py +141 -0
  40. refchecker/core/parallel_processor.py +415 -0
  41. refchecker/core/refchecker.py +5838 -0
  42. refchecker/database/__init__.py +6 -0
  43. refchecker/database/download_semantic_scholar_db.py +1725 -0
  44. refchecker/llm/__init__.py +0 -0
  45. refchecker/llm/base.py +376 -0
  46. refchecker/llm/providers.py +911 -0
  47. refchecker/scripts/__init__.py +1 -0
  48. refchecker/scripts/start_vllm_server.py +121 -0
  49. refchecker/services/__init__.py +8 -0
  50. refchecker/services/pdf_processor.py +268 -0
  51. refchecker/utils/__init__.py +27 -0
  52. refchecker/utils/arxiv_utils.py +462 -0
  53. refchecker/utils/author_utils.py +179 -0
  54. refchecker/utils/biblatex_parser.py +584 -0
  55. refchecker/utils/bibliography_utils.py +332 -0
  56. refchecker/utils/bibtex_parser.py +411 -0
  57. refchecker/utils/config_validator.py +262 -0
  58. refchecker/utils/db_utils.py +210 -0
  59. refchecker/utils/doi_utils.py +190 -0
  60. refchecker/utils/error_utils.py +482 -0
  61. refchecker/utils/mock_objects.py +211 -0
  62. refchecker/utils/text_utils.py +5057 -0
  63. refchecker/utils/unicode_utils.py +335 -0
  64. refchecker/utils/url_utils.py +307 -0
@@ -0,0 +1,17 @@
1
+ """
2
+ Reference checker implementations for different sources
3
+ """
4
+
5
+ from .semantic_scholar import NonArxivReferenceChecker
6
+ from .local_semantic_scholar import LocalNonArxivReferenceChecker
7
+ from .enhanced_hybrid_checker import EnhancedHybridReferenceChecker
8
+ from .openalex import OpenAlexReferenceChecker
9
+ from .crossref import CrossRefReferenceChecker
10
+
11
+ __all__ = [
12
+ "NonArxivReferenceChecker",
13
+ "LocalNonArxivReferenceChecker",
14
+ "EnhancedHybridReferenceChecker",
15
+ "OpenAlexReferenceChecker",
16
+ "CrossRefReferenceChecker"
17
+ ]
@@ -0,0 +1,541 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ CrossRef API Client for Reference Verification
4
+
5
+ This module provides functionality to verify references using the CrossRef API.
6
+ CrossRef maintains metadata for over 165 million research outputs from 20,000+ members
7
+ and is particularly strong for publisher-registered content with DOIs.
8
+
9
+ Usage:
10
+ from crossref import CrossRefReferenceChecker
11
+
12
+ # Initialize the checker
13
+ checker = CrossRefReferenceChecker(email="your@email.com") # Email for polite pool
14
+
15
+ # Verify a reference
16
+ reference = {
17
+ 'title': 'Title of the paper',
18
+ 'authors': ['Author 1', 'Author 2'],
19
+ 'year': 2020,
20
+ 'doi': '10.1000/xyz123',
21
+ 'raw_text': 'Full citation text'
22
+ }
23
+
24
+ verified_data, errors, url = checker.verify_reference(reference)
25
+ """
26
+
27
+ import requests
28
+ import time
29
+ import logging
30
+ import re
31
+ from typing import Dict, List, Tuple, Optional, Any, Union
32
+ from urllib.parse import quote_plus
33
+ from refchecker.utils.text_utils import normalize_text, clean_title_basic, find_best_match, is_name_match, compare_authors, clean_title_for_search
34
+ from refchecker.utils.error_utils import format_year_mismatch, format_doi_mismatch
35
+ from refchecker.config.settings import get_config
36
+
37
+ # Set up logging
38
+ logger = logging.getLogger(__name__)
39
+
40
+ # Get configuration
41
+ config = get_config()
42
+ SIMILARITY_THRESHOLD = config["text_processing"]["similarity_threshold"]
43
+
44
+ class CrossRefReferenceChecker:
45
+ """
46
+ A class to verify references using the CrossRef API
47
+ """
48
+
49
+ def __init__(self, email: Optional[str] = None):
50
+ """
51
+ Initialize the CrossRef API client
52
+
53
+ Args:
54
+ email: Optional email for polite pool access (better performance)
55
+ """
56
+ self.base_url = "https://api.crossref.org"
57
+ self.headers = {
58
+ "Accept": "application/json",
59
+ "User-Agent": "RefChecker/1.0.0 (https://github.com/markrussinovich/refchecker)"
60
+ }
61
+
62
+ # Add email to headers for polite pool access
63
+ if email:
64
+ self.headers["User-Agent"] += f"; mailto:{email}"
65
+
66
+ # Rate limiting parameters - CrossRef has variable rate limits
67
+ self.request_delay = 0.05 # 50ms between requests (20 req/sec conservative)
68
+ self.max_retries = 3
69
+ self.backoff_factor = 2
70
+
71
+ def search_works(self, query: str, year: Optional[int] = None, limit: int = 5) -> List[Dict[str, Any]]:
72
+ """
73
+ Search for works matching the query
74
+
75
+ Args:
76
+ query: Search query (title, authors, etc.)
77
+ year: Publication year to filter by
78
+ limit: Maximum number of results to return
79
+
80
+ Returns:
81
+ List of work data dictionaries
82
+ """
83
+ endpoint = f"{self.base_url}/works"
84
+
85
+ params = {
86
+ "query": query,
87
+ "rows": min(limit, 20), # Limit for performance
88
+ "select": "DOI,title,author,published,publisher,container-title,type,URL,link,abstract,subject"
89
+ }
90
+
91
+ # Add year filter if provided
92
+ if year:
93
+ params["filter"] = f"from-pub-date:{year},until-pub-date:{year}"
94
+
95
+ # Make the request with retries and backoff
96
+ for attempt in range(self.max_retries):
97
+ try:
98
+ # Add delay to respect rate limits
99
+ time.sleep(self.request_delay)
100
+
101
+ response = requests.get(endpoint, headers=self.headers, params=params, timeout=30)
102
+
103
+ # Check for rate limiting
104
+ if response.status_code == 429:
105
+ # Check if rate limit info is in headers
106
+ retry_after = response.headers.get('Retry-After')
107
+ if retry_after:
108
+ wait_time = int(retry_after) + 1
109
+ else:
110
+ wait_time = self.request_delay * (self.backoff_factor ** attempt) + 1
111
+
112
+ logger.debug(f"CrossRef rate limit exceeded. Retrying in {wait_time} seconds...")
113
+ time.sleep(wait_time)
114
+ continue
115
+
116
+ # Check for other errors
117
+ response.raise_for_status()
118
+
119
+ # Parse the response
120
+ data = response.json()
121
+ results = data.get('message', {}).get('items', [])
122
+
123
+ logger.debug(f"CrossRef search returned {len(results)} results for query: {query[:50]}...")
124
+ return results
125
+
126
+ except requests.exceptions.RequestException as e:
127
+ wait_time = self.request_delay * (self.backoff_factor ** attempt) + 1
128
+ logger.debug(f"CrossRef request failed: {str(e)}. Retrying in {wait_time:.2f} seconds...")
129
+ time.sleep(wait_time)
130
+
131
+ # If we get here, all retries failed
132
+ logger.warning(f"Failed to search CrossRef after {self.max_retries} attempts")
133
+ return []
134
+
135
+ def get_work_by_doi(self, doi: str) -> Optional[Dict[str, Any]]:
136
+ """
137
+ Get work data by DOI
138
+
139
+ Args:
140
+ doi: DOI of the work
141
+
142
+ Returns:
143
+ Work data dictionary or None if not found
144
+ """
145
+ # Clean DOI - remove any prefixes
146
+ clean_doi = doi
147
+ if doi.startswith('doi:'):
148
+ clean_doi = doi[4:]
149
+ if doi.startswith('https://doi.org/'):
150
+ clean_doi = doi[16:]
151
+ if doi.startswith('http://doi.org/'):
152
+ clean_doi = doi[15:]
153
+
154
+ endpoint = f"{self.base_url}/works/{clean_doi}"
155
+
156
+ # Note: The individual DOI endpoint does not support the 'select' parameter
157
+ # It returns all fields by default, which is what we want
158
+ params = {}
159
+
160
+ # Make the request with retries and backoff
161
+ for attempt in range(self.max_retries):
162
+ try:
163
+ # Add delay to respect rate limits
164
+ time.sleep(self.request_delay)
165
+
166
+ response = requests.get(endpoint, headers=self.headers, params=params, timeout=30)
167
+
168
+ # Check for rate limiting
169
+ if response.status_code == 429:
170
+ retry_after = response.headers.get('Retry-After')
171
+ if retry_after:
172
+ wait_time = int(retry_after) + 1
173
+ else:
174
+ wait_time = self.request_delay * (self.backoff_factor ** attempt) + 1
175
+
176
+ logger.debug(f"CrossRef rate limit exceeded. Retrying in {wait_time} seconds...")
177
+ time.sleep(wait_time)
178
+ continue
179
+
180
+ # If not found, return None
181
+ if response.status_code == 404:
182
+ logger.debug(f"Work with DOI {doi} not found in CrossRef")
183
+ return None
184
+
185
+ # Check for other errors
186
+ response.raise_for_status()
187
+
188
+ # Parse the response
189
+ data = response.json()
190
+ work_data = data.get('message', {})
191
+ logger.debug(f"Found work by DOI in CrossRef: {doi}")
192
+ return work_data
193
+
194
+ except requests.exceptions.RequestException as e:
195
+ wait_time = self.request_delay * (self.backoff_factor ** attempt) + 1
196
+ logger.warning(f"CrossRef request failed: {str(e)}. Retrying in {wait_time:.2f} seconds...")
197
+ time.sleep(wait_time)
198
+
199
+ # If we get here, all retries failed
200
+ logger.error(f"Failed to get work by DOI from CrossRef after {self.max_retries} attempts")
201
+ return None
202
+
203
+ def extract_doi_from_url(self, url: str) -> Optional[str]:
204
+ """
205
+ Extract DOI from a URL
206
+
207
+ Args:
208
+ url: URL that might contain a DOI
209
+
210
+ Returns:
211
+ Extracted DOI or None if not found
212
+ """
213
+ if not url:
214
+ return None
215
+
216
+ # Only extract DOIs from actual DOI URLs, not from other domains
217
+ # This prevents false positives from URLs like aclanthology.org
218
+ if 'doi.org' not in url and 'doi:' not in url:
219
+ return None
220
+
221
+ # Check if it's a DOI URL
222
+ doi_patterns = [
223
+ r'doi\.org/([^/\s\?#]+(?:/[^/\s\?#]+)*)', # Full DOI pattern
224
+ r'doi:([^/\s\?#]+(?:/[^/\s\?#]+)*)', # doi: prefix
225
+ ]
226
+
227
+ for pattern in doi_patterns:
228
+ match = re.search(pattern, url)
229
+ if match:
230
+ doi_candidate = match.group(1)
231
+ # DOIs must start with "10." and have at least one slash
232
+ if doi_candidate.startswith('10.') and '/' in doi_candidate and len(doi_candidate) > 6:
233
+ return doi_candidate
234
+
235
+ return None
236
+
237
+ def normalize_author_name(self, name: str) -> str:
238
+ """
239
+ Normalize author name for comparison
240
+
241
+ Args:
242
+ name: Author name
243
+
244
+ Returns:
245
+ Normalized name
246
+ """
247
+ # Remove reference numbers (e.g., "[1]")
248
+ name = re.sub(r'^\[\d+\]', '', name)
249
+
250
+ # Use common normalization function
251
+ return normalize_text(name)
252
+
253
+ def compare_authors(self, cited_authors: List[str], crossref_authors: List[Dict[str, Any]]) -> Tuple[bool, str]:
254
+ """
255
+ Compare author lists to check if they match (delegates to shared utility)
256
+
257
+ Args:
258
+ cited_authors: List of author names as cited
259
+ crossref_authors: List of author data from CrossRef
260
+
261
+ Returns:
262
+ Tuple of (match_result, error_message)
263
+ """
264
+ # Extract author names from CrossRef data for the shared utility
265
+ author_dicts = []
266
+ for author in crossref_authors:
267
+ # CrossRef author format: {"given": "First", "family": "Last", "name": "Full Name"}
268
+ name = None
269
+ if 'name' in author:
270
+ name = author['name']
271
+ elif 'given' in author and 'family' in author:
272
+ name = f"{author['given']} {author['family']}"
273
+ elif 'family' in author:
274
+ name = author['family']
275
+
276
+ if name:
277
+ author_dicts.append({'name': name})
278
+
279
+ return compare_authors(cited_authors, author_dicts)
280
+
281
+ def is_name_match(self, name1: str, name2: str) -> bool:
282
+ """
283
+ Check if two author names match, allowing for variations
284
+
285
+ Args:
286
+ name1: First author name (normalized)
287
+ name2: Second author name (normalized)
288
+
289
+ Returns:
290
+ True if names match, False otherwise
291
+ """
292
+ # Exact match
293
+ if name1 == name2:
294
+ return True
295
+
296
+ # If one is a substring of the other, consider it a match
297
+ if name1 in name2 or name2 in name1:
298
+ return True
299
+
300
+ # Split into parts (first name, last name, etc.)
301
+ parts1 = name1.split()
302
+ parts2 = name2.split()
303
+
304
+ if not parts1 or not parts2:
305
+ return False
306
+
307
+ # If either name has only one part, compare directly
308
+ if len(parts1) == 1 or len(parts2) == 1:
309
+ return parts1[-1] == parts2[-1] # Compare last parts (last names)
310
+
311
+ # Compare last names (last parts)
312
+ if parts1[-1] != parts2[-1]:
313
+ return False
314
+
315
+ # Compare first initials
316
+ if len(parts1[0]) > 0 and len(parts2[0]) > 0 and parts1[0][0] != parts2[0][0]:
317
+ return False
318
+
319
+ return True
320
+
321
+ def extract_year_from_published(self, published: Dict[str, List[int]]) -> Optional[int]:
322
+ """
323
+ Extract year from CrossRef published date
324
+
325
+ Args:
326
+ published: Published date object from CrossRef
327
+
328
+ Returns:
329
+ Publication year or None
330
+ """
331
+ if not published:
332
+ return None
333
+
334
+ # CrossRef date format: {"date-parts": [[2017, 6, 12]]}
335
+ date_parts = published.get('date-parts', [])
336
+ if date_parts and len(date_parts) > 0 and len(date_parts[0]) > 0:
337
+ return date_parts[0][0] # First element is the year
338
+
339
+ return None
340
+
341
+ def extract_url_from_work(self, work_data: Dict[str, Any]) -> Optional[str]:
342
+ """
343
+ Extract the best URL from CrossRef work data
344
+
345
+ Args:
346
+ work_data: Work data from CrossRef
347
+
348
+ Returns:
349
+ Best available URL or None
350
+ """
351
+ # Priority order: Direct URL, DOI URL, Link URLs
352
+
353
+ # Check for direct URL
354
+ if work_data.get('URL'):
355
+ logger.debug(f"Found direct URL: {work_data['URL']}")
356
+ return work_data['URL']
357
+
358
+ # Check for DOI
359
+ doi = work_data.get('DOI')
360
+ if doi:
361
+ from refchecker.utils.doi_utils import construct_doi_url
362
+ doi_url = construct_doi_url(doi)
363
+ logger.debug(f"Generated DOI URL: {doi_url}")
364
+ return doi_url
365
+
366
+ # Check link arrays for URLs
367
+ links = work_data.get('link', [])
368
+ for link in links:
369
+ if isinstance(link, dict) and link.get('URL'):
370
+ logger.debug(f"Found link URL: {link['URL']}")
371
+ return link['URL']
372
+
373
+ logger.debug("No URL found in CrossRef work data")
374
+ return None
375
+
376
+ def verify_reference(self, reference: Dict[str, Any]) -> Tuple[Optional[Dict[str, Any]], List[Dict[str, Any]], Optional[str]]:
377
+ """
378
+ Verify a reference using CrossRef
379
+
380
+ Args:
381
+ reference: Reference data dictionary
382
+
383
+ Returns:
384
+ Tuple of (verified_data, errors, url)
385
+ - verified_data: Work data from CrossRef or None if not found
386
+ - errors: List of error dictionaries
387
+ - url: URL of the work if found, None otherwise
388
+ """
389
+ errors = []
390
+
391
+ # Extract reference data
392
+ title = reference.get('title', '')
393
+ authors = reference.get('authors', [])
394
+ year = reference.get('year', 0)
395
+ url = reference.get('url', '')
396
+ raw_text = reference.get('raw_text', '')
397
+
398
+ # If we have a DOI, try to get the work directly
399
+ doi = None
400
+ if 'doi' in reference and reference['doi']:
401
+ doi = reference['doi']
402
+ elif url:
403
+ doi = self.extract_doi_from_url(url)
404
+
405
+ work_data = None
406
+
407
+ if doi:
408
+ # Try to get the work by DOI
409
+ work_data = self.get_work_by_doi(doi)
410
+
411
+ if work_data:
412
+ logger.debug(f"Found work by DOI in CrossRef: {doi}")
413
+ else:
414
+ logger.debug(f"Could not find work with DOI in CrossRef: {doi}")
415
+
416
+ # If we couldn't get the work by DOI, try searching by title
417
+ if not work_data and title:
418
+ # Clean up the title for search using centralized utility function
419
+ cleaned_title = clean_title_for_search(title)
420
+
421
+ # Search for the work
422
+ search_results = self.search_works(cleaned_title, year)
423
+
424
+ # Process search results for CrossRef format
425
+ processed_results = []
426
+ for result in search_results:
427
+ # CrossRef title format: ["Title of the Paper"]
428
+ result_titles = result.get('title', [])
429
+ if result_titles:
430
+ result_title = result_titles[0] if isinstance(result_titles, list) else str(result_titles)
431
+ # Create a normalized result for the utility function
432
+ processed_result = dict(result)
433
+ processed_result['title'] = result_title
434
+ processed_result['publication_year'] = self.extract_year_from_published(result.get('published'))
435
+ processed_results.append(processed_result)
436
+
437
+ if processed_results:
438
+ best_match, best_score = find_best_match(processed_results, cleaned_title, year, authors)
439
+
440
+ # Use match if score is good enough
441
+ if best_match and best_score >= SIMILARITY_THRESHOLD:
442
+ work_data = best_match
443
+ logger.debug(f"Found work by title in CrossRef with score {best_score:.2f}: {cleaned_title}")
444
+ else:
445
+ logger.debug(f"No good title match found in CrossRef (best score: {best_score:.2f})")
446
+ else:
447
+ logger.debug(f"No works found for title in CrossRef: {cleaned_title}")
448
+
449
+ # If we still couldn't find the work, return no verification
450
+ if not work_data:
451
+ logger.debug("Could not find matching work in CrossRef")
452
+ return None, [], None
453
+
454
+ # Verify authors
455
+ if authors:
456
+ crossref_authors = work_data.get('author', [])
457
+ authors_match, author_error = self.compare_authors(authors, crossref_authors)
458
+
459
+ if not authors_match:
460
+ # Extract correct author names for error reporting
461
+ correct_author_names = []
462
+ for author in crossref_authors:
463
+ if 'name' in author:
464
+ correct_author_names.append(author['name'])
465
+ elif 'given' in author and 'family' in author:
466
+ full_name = f"{author['given']} {author['family']}"
467
+ correct_author_names.append(full_name)
468
+ elif 'family' in author:
469
+ correct_author_names.append(author['family'])
470
+
471
+ errors.append({
472
+ 'error_type': 'author',
473
+ 'error_details': author_error,
474
+ 'ref_authors_correct': ', '.join(correct_author_names)
475
+ })
476
+
477
+ # Verify year
478
+ work_year = self.extract_year_from_published(work_data.get('published'))
479
+ if year and work_year and year != work_year:
480
+ errors.append({
481
+ 'warning_type': 'year',
482
+ 'warning_details': format_year_mismatch(year, work_year),
483
+ 'ref_year_correct': work_year
484
+ })
485
+
486
+ # Verify DOI
487
+ work_doi = work_data.get('DOI')
488
+ if doi and work_doi:
489
+ # Compare DOIs using the proper comparison function
490
+ from refchecker.utils.doi_utils import compare_dois, validate_doi_resolves
491
+ if not compare_dois(doi, work_doi):
492
+ # If cited DOI resolves, it's likely a valid alternate DOI (e.g., arXiv vs conference)
493
+ # Treat as warning instead of error
494
+ if validate_doi_resolves(doi):
495
+ errors.append({
496
+ 'warning_type': 'doi',
497
+ 'warning_details': format_doi_mismatch(doi, work_doi),
498
+ 'ref_doi_correct': work_doi
499
+ })
500
+ else:
501
+ errors.append({
502
+ 'error_type': 'doi',
503
+ 'error_details': format_doi_mismatch(doi, work_doi),
504
+ 'ref_doi_correct': work_doi
505
+ })
506
+
507
+ # Extract URL from work data
508
+ work_url = self.extract_url_from_work(work_data)
509
+
510
+ return work_data, errors, work_url
511
+
512
+ if __name__ == "__main__":
513
+ # Example usage
514
+ checker = CrossRefReferenceChecker(email="test@example.com")
515
+
516
+ # Example reference
517
+ reference = {
518
+ 'title': 'Attention is All You Need',
519
+ 'authors': ['Ashish Vaswani', 'Noam Shazeer'],
520
+ 'year': 2017,
521
+ 'doi': '10.5555/3295222.3295349',
522
+ 'raw_text': 'Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., ... & Polosukhin, I. (2017). Attention is all you need. Advances in neural information processing systems, 30.'
523
+ }
524
+
525
+ # Verify the reference
526
+ verified_data, errors, url = checker.verify_reference(reference)
527
+
528
+ if verified_data:
529
+ print(f"Found work: {verified_data.get('title', ['Unknown'])[0]}")
530
+ print(f"DOI: {verified_data.get('DOI', 'None')}")
531
+ print(f"URL: {url}")
532
+
533
+ if errors:
534
+ print("Errors found:")
535
+ for error in errors:
536
+ error_type = error.get('error_type') or error.get('warning_type')
537
+ print(f" - {error_type}: {error.get('error_details') or error.get('warning_details')}")
538
+ else:
539
+ print("No errors found")
540
+ else:
541
+ print("Could not find matching work")