mdrefcheck 0.1.7__tar.gz → 0.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mdrefcheck might be problematic. Click here for more details.
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/.pre-commit-hooks.yaml +2 -1
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/Cargo.lock +1 -1
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/Cargo.toml +12 -1
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/PKG-INFO +2 -2
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/README.md +1 -1
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/pyproject.toml +1 -1
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/checks/email.rs +4 -4
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/checks/image.rs +3 -3
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/checks/section.rs +13 -15
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/checks.rs +1 -1
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/main.rs +2 -2
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/parser.rs +5 -1
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/scanner.rs +13 -8
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/utils.rs +7 -2
- mdrefcheck-0.1.7/release.toml +0 -5
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/.github/workflows/ci.yml +0 -0
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/.gitignore +0 -0
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/LICENSE +0 -0
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/rustfmt.toml +0 -0
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/config.rs +0 -0
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/diagnostics.rs +0 -0
- {mdrefcheck-0.1.7 → mdrefcheck-0.1.8}/src/lib.rs +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[package]
|
|
2
2
|
name = "mdrefcheck"
|
|
3
|
-
version = "0.1.
|
|
3
|
+
version = "0.1.8"
|
|
4
4
|
edition = "2024"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
description = "A CLI tool to validate references in markdown files."
|
|
@@ -19,3 +19,14 @@ pathdiff = "0.2.3"
|
|
|
19
19
|
pulldown-cmark = "0.13.0"
|
|
20
20
|
regex = "1.11.2"
|
|
21
21
|
walkdir = "2.5.0"
|
|
22
|
+
|
|
23
|
+
[package.metadata.release]
|
|
24
|
+
pre-release-replacements = [
|
|
25
|
+
{ file = "pyproject.toml", search = 'version = "[a-z0-9\\.-]+"', replace = 'version = "{{version}}"' },
|
|
26
|
+
{ file = "README.md", search = 'rev: v[a-z0-9\\.-]+', replace = 'rev: v{{version}}' },
|
|
27
|
+
]
|
|
28
|
+
pre-release-commit-message = "chore(release): prepare for {{version}}"
|
|
29
|
+
|
|
30
|
+
[lints.clippy]
|
|
31
|
+
pedantic = "warn"
|
|
32
|
+
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mdrefcheck
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.8
|
|
4
4
|
Classifier: Development Status :: 4 - Beta
|
|
5
5
|
Classifier: Environment :: Console
|
|
6
6
|
Classifier: Intended Audience :: Developers
|
|
@@ -63,7 +63,7 @@ Add this to your `.pre-commit-config.yaml`:
|
|
|
63
63
|
```yaml
|
|
64
64
|
repos:
|
|
65
65
|
- repo: https://github.com/gospodima/mdrefcheck
|
|
66
|
-
rev: v0.1.
|
|
66
|
+
rev: v0.1.8
|
|
67
67
|
hooks:
|
|
68
68
|
- id: mdrefcheck
|
|
69
69
|
```
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
use regex::Regex;
|
|
2
2
|
|
|
3
3
|
pub fn validate_email(email: &str) -> Result<(), String> {
|
|
4
|
-
if
|
|
5
|
-
Err(format!("Invalid email: {}", email))
|
|
6
|
-
} else {
|
|
4
|
+
if is_valid_email(email) {
|
|
7
5
|
Ok(())
|
|
6
|
+
} else {
|
|
7
|
+
Err(format!("Invalid email: {email}"))
|
|
8
8
|
}
|
|
9
9
|
}
|
|
10
10
|
|
|
11
|
-
/// Email validation according to https://spec.commonmark.org/0.31.2/#email-address
|
|
11
|
+
/// Email validation according to <https://spec.commonmark.org/0.31.2/#email-address>
|
|
12
12
|
fn is_valid_email(s: &str) -> bool {
|
|
13
13
|
static EMAIL_RE: &str = r"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$";
|
|
14
14
|
Regex::new(EMAIL_RE).unwrap().is_match(s)
|
|
@@ -10,9 +10,9 @@ pub fn validate_image(current_path: &Path, dest: &str) -> Result<(), String> {
|
|
|
10
10
|
.unwrap_or_else(|| Path::new("."))
|
|
11
11
|
.join(dest);
|
|
12
12
|
|
|
13
|
-
if
|
|
14
|
-
Err(format!("Image not found: {}", dest))
|
|
15
|
-
} else {
|
|
13
|
+
if resolved.exists() {
|
|
16
14
|
Ok(())
|
|
15
|
+
} else {
|
|
16
|
+
Err(format!("Image not found: {dest}"))
|
|
17
17
|
}
|
|
18
18
|
}
|
|
@@ -9,8 +9,7 @@ pub fn validate_section_link(
|
|
|
9
9
|
) -> Result<(), String> {
|
|
10
10
|
let (file_part, heading_part) = dest
|
|
11
11
|
.split_once('#')
|
|
12
|
-
.
|
|
13
|
-
.unwrap_or((dest, None));
|
|
12
|
+
.map_or((dest, None), |(f, h)| (f, Some(h)));
|
|
14
13
|
|
|
15
14
|
let target_file = if file_part.is_empty() {
|
|
16
15
|
current_path.to_path_buf()
|
|
@@ -20,24 +19,23 @@ pub fn validate_section_link(
|
|
|
20
19
|
.unwrap_or_else(|| Path::new("."))
|
|
21
20
|
.join(file_part);
|
|
22
21
|
fs::canonicalize(&resolved)
|
|
23
|
-
.map_err(|_| format!("File not found: {}"
|
|
22
|
+
.map_err(|_| format!("File not found: {file_part}"))?
|
|
24
23
|
};
|
|
25
24
|
|
|
26
|
-
if let Some(heading) = heading_part
|
|
27
|
-
|
|
25
|
+
if let Some(heading) = heading_part
|
|
26
|
+
&& !section_links
|
|
28
27
|
.entry(target_file.clone())
|
|
29
28
|
.or_insert_with(|| parser::parse_file_headings(&target_file).unwrap())
|
|
30
29
|
.contains(heading)
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
}
|
|
30
|
+
{
|
|
31
|
+
return Err(format!(
|
|
32
|
+
"Missing heading #{heading}{}",
|
|
33
|
+
if file_part.is_empty() {
|
|
34
|
+
String::new()
|
|
35
|
+
} else {
|
|
36
|
+
format!(" in {file_part}")
|
|
37
|
+
}
|
|
38
|
+
));
|
|
41
39
|
}
|
|
42
40
|
|
|
43
41
|
Ok(())
|
|
@@ -111,7 +111,7 @@ fn check_inline(
|
|
|
111
111
|
validate_section_link(current_path, dest, doc_headings)
|
|
112
112
|
}
|
|
113
113
|
|
|
114
|
-
fn to_exclude(dest: &str, exclude_link_regexes: &
|
|
114
|
+
fn to_exclude(dest: &str, exclude_link_regexes: &[Regex]) -> bool {
|
|
115
115
|
exclude_link_regexes
|
|
116
116
|
.iter()
|
|
117
117
|
.any(|re| re.is_match(dest.as_ref()))
|
|
@@ -22,7 +22,7 @@ fn main() {
|
|
|
22
22
|
{
|
|
23
23
|
let errors = run_checks(&content, path, &mut section_links, &config);
|
|
24
24
|
for err in &errors {
|
|
25
|
-
println!("{}"
|
|
25
|
+
println!("{err}");
|
|
26
26
|
}
|
|
27
27
|
if !errors.is_empty() {
|
|
28
28
|
has_errors = true;
|
|
@@ -35,5 +35,5 @@ fn main() {
|
|
|
35
35
|
process::exit(1);
|
|
36
36
|
}
|
|
37
37
|
|
|
38
|
-
println!("{}", "No broken references found.".green())
|
|
38
|
+
println!("{}", "No broken references found.".green());
|
|
39
39
|
}
|
|
@@ -10,6 +10,8 @@ use crate::utils::create_options;
|
|
|
10
10
|
pub type SectionLinkMap = HashMap<PathBuf, HashSet<String>>;
|
|
11
11
|
|
|
12
12
|
/// Scan markdown file and collect section links based on its heading.
|
|
13
|
+
/// # Errors
|
|
14
|
+
/// This function will return an error if `path` does not already exist.
|
|
13
15
|
pub fn parse_file_headings(path: &PathBuf) -> io::Result<HashSet<String>> {
|
|
14
16
|
fs::read_to_string(path)
|
|
15
17
|
.map(|content| crate::parser::collect_heading_links(&content))
|
|
@@ -34,6 +36,7 @@ pub fn parse_file_headings(path: &PathBuf) -> io::Result<HashSet<String>> {
|
|
|
34
36
|
/// assert!(anchors.contains("intro-1"));
|
|
35
37
|
/// assert!(anchors.contains("hello-world"));
|
|
36
38
|
/// ```
|
|
39
|
+
#[must_use]
|
|
37
40
|
pub fn collect_heading_links(content: &str) -> HashSet<String> {
|
|
38
41
|
let mut headings = HashSet::new();
|
|
39
42
|
let mut heading_counter = HashMap::new();
|
|
@@ -53,7 +56,7 @@ pub fn collect_heading_links(content: &str) -> HashSet<String> {
|
|
|
53
56
|
Event::End(TagEnd::Heading { .. }) => {
|
|
54
57
|
let base_link = heading2link(¤t_heading);
|
|
55
58
|
let link = if let Some(counter) = heading_counter.get_mut(&base_link) {
|
|
56
|
-
let numbered_link = format!("{}-{}"
|
|
59
|
+
let numbered_link = format!("{base_link}-{counter}");
|
|
57
60
|
*counter += 1;
|
|
58
61
|
numbered_link
|
|
59
62
|
} else {
|
|
@@ -82,6 +85,7 @@ pub fn collect_heading_links(content: &str) -> HashSet<String> {
|
|
|
82
85
|
/// assert_eq!(heading2link("This -- Is__A_Test!"), "this----is__a_test");
|
|
83
86
|
/// assert_eq!(heading2link("A heading with 💡 emoji!"), "a-heading-with--emoji");
|
|
84
87
|
/// ```
|
|
88
|
+
#[must_use]
|
|
85
89
|
pub fn heading2link(text: &str) -> String {
|
|
86
90
|
text.to_lowercase()
|
|
87
91
|
.chars()
|
|
@@ -9,15 +9,17 @@ use walkdir::WalkDir;
|
|
|
9
9
|
use crate::utils::relative_path;
|
|
10
10
|
|
|
11
11
|
/// Gather markdown files from paths (file or dir)
|
|
12
|
-
|
|
12
|
+
#[must_use]
|
|
13
|
+
pub fn gather_markdown_files<S: ::std::hash::BuildHasher>(
|
|
13
14
|
paths: &[PathBuf],
|
|
14
|
-
exclude: &HashSet<PathBuf>,
|
|
15
|
+
exclude: &HashSet<PathBuf, S>,
|
|
15
16
|
) -> Vec<PathBuf> {
|
|
16
17
|
paths
|
|
17
18
|
.iter()
|
|
18
|
-
.flat_map(|path|
|
|
19
|
-
Ok(canonical)
|
|
20
|
-
|
|
19
|
+
.flat_map(|path| {
|
|
20
|
+
if let Ok(canonical) = fs::canonicalize(path) {
|
|
21
|
+
collect_markdown_from_path(&canonical, exclude)
|
|
22
|
+
} else {
|
|
21
23
|
eprintln!(
|
|
22
24
|
"{}",
|
|
23
25
|
format!("Skipping invalid path: {}", path.display()).yellow()
|
|
@@ -29,7 +31,10 @@ pub fn gather_markdown_files(
|
|
|
29
31
|
}
|
|
30
32
|
|
|
31
33
|
/// Collect markdown file(s) from a path (file or dir)
|
|
32
|
-
fn collect_markdown_from_path
|
|
34
|
+
fn collect_markdown_from_path<S: ::std::hash::BuildHasher>(
|
|
35
|
+
path: &Path,
|
|
36
|
+
exclude: &HashSet<PathBuf, S>,
|
|
37
|
+
) -> Vec<PathBuf> {
|
|
33
38
|
if exclude.contains(path) {
|
|
34
39
|
eprintln!(
|
|
35
40
|
"{}",
|
|
@@ -50,7 +55,7 @@ fn collect_markdown_from_path(path: &Path, exclude: &HashSet<PathBuf>) -> Vec<Pa
|
|
|
50
55
|
entry
|
|
51
56
|
.path()
|
|
52
57
|
.canonicalize()
|
|
53
|
-
.
|
|
58
|
+
.is_ok_and(|p| !exclude.contains(&p))
|
|
54
59
|
})
|
|
55
60
|
.filter_map(Result::ok)
|
|
56
61
|
.filter(|entry| is_markdown_file(entry.path()))
|
|
@@ -63,5 +68,5 @@ fn collect_markdown_from_path(path: &Path, exclude: &HashSet<PathBuf>) -> Vec<Pa
|
|
|
63
68
|
|
|
64
69
|
/// Determine if the given file path is a markdown file
|
|
65
70
|
fn is_markdown_file(path: &Path) -> bool {
|
|
66
|
-
path.is_file() && path.extension().
|
|
71
|
+
path.is_file() && path.extension().is_some_and(|ext| ext == "md")
|
|
67
72
|
}
|
|
@@ -6,12 +6,14 @@ use std::{
|
|
|
6
6
|
|
|
7
7
|
use pulldown_cmark::Options;
|
|
8
8
|
|
|
9
|
+
#[must_use]
|
|
9
10
|
pub fn create_options() -> Options {
|
|
10
11
|
Options::ENABLE_FOOTNOTES | Options::ENABLE_WIKILINKS
|
|
11
12
|
}
|
|
12
13
|
|
|
13
|
-
/// Create HashSet of canonicalized paths from vector of paths
|
|
14
|
-
|
|
14
|
+
/// Create ``HashSet`` of canonicalized paths from vector of paths
|
|
15
|
+
#[must_use]
|
|
16
|
+
pub fn create_file_set(vec_files: &[PathBuf]) -> HashSet<PathBuf> {
|
|
15
17
|
vec_files
|
|
16
18
|
.iter()
|
|
17
19
|
.filter_map(|s| fs::canonicalize(s).ok())
|
|
@@ -19,6 +21,7 @@ pub fn create_file_set(vec_files: &Vec<PathBuf>) -> HashSet<PathBuf> {
|
|
|
19
21
|
}
|
|
20
22
|
|
|
21
23
|
/// Return a path relative to the current working directory
|
|
24
|
+
#[must_use]
|
|
22
25
|
pub fn relative_path(target: &Path) -> String {
|
|
23
26
|
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
|
|
24
27
|
|
|
@@ -33,6 +36,7 @@ pub fn relative_path(target: &Path) -> String {
|
|
|
33
36
|
}
|
|
34
37
|
|
|
35
38
|
/// Return a Vec where each entry is the byte offset of the start of a line
|
|
39
|
+
#[must_use]
|
|
36
40
|
pub fn compute_line_starts(text: &str) -> Vec<usize> {
|
|
37
41
|
std::iter::once(0)
|
|
38
42
|
.chain(
|
|
@@ -43,6 +47,7 @@ pub fn compute_line_starts(text: &str) -> Vec<usize> {
|
|
|
43
47
|
}
|
|
44
48
|
|
|
45
49
|
/// Convert a byte offset into (line, column) given precomputed line starts
|
|
50
|
+
#[must_use]
|
|
46
51
|
pub fn offset_to_line_col(offset: usize, line_starts: &[usize]) -> (usize, usize) {
|
|
47
52
|
match line_starts.binary_search(&offset) {
|
|
48
53
|
Ok(line) => (line + 1, 1), // exact match, first col
|
mdrefcheck-0.1.7/release.toml
DELETED
|
@@ -1,5 +0,0 @@
|
|
|
1
|
-
pre-release-replacements = [
|
|
2
|
-
{file="pyproject.toml", search='version = "[a-z0-9\\.-]+"', replace='version = "{{version}}"'},
|
|
3
|
-
{file="README.md", search='rev: v[a-z0-9\\.-]+', replace='rev: v{{version}}'}
|
|
4
|
-
]
|
|
5
|
-
pre-release-commit-message = "chore(release): prepare for {{version}}"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|