astro-accelerator 4.0.51 → 4.0.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "4.0.51",
2
+ "version": "4.0.53",
3
3
  "author": "Steve Fenton",
4
4
  "name": "astro-accelerator",
5
5
  "description": "A super-lightweight, accessible, SEO-friendly starter project for Astro",
@@ -32,8 +32,8 @@
32
32
  },
33
33
  "dependencies": {
34
34
  "@astrojs/mdx": "^2.3.1",
35
- "astro": "^4.15.8",
36
- "astro-accelerator-utils": "^0.3.20",
35
+ "astro": "^4.15.9",
36
+ "astro-accelerator-utils": "^0.3.22",
37
37
  "cspell": "^8.14.4",
38
38
  "csv": "^6.3.10",
39
39
  "glob": "^11.0.0",
@@ -45,7 +45,7 @@
45
45
  "sharp": "^0.33.5"
46
46
  },
47
47
  "devDependencies": {
48
- "@playwright/test": "^1.47.1"
48
+ "@playwright/test": "^1.47.2"
49
49
  },
50
50
  "files": [
51
51
  ".npmrc",
@@ -1,3 +1,5 @@
1
+ /** @format */
2
+
1
3
  // warning: This file is overwritten by Astro Accelerator
2
4
 
3
5
  // Generates an ATOM feed of recent posts
@@ -5,60 +7,67 @@ import { SITE } from '@config';
5
7
  import { Accelerator, PostFiltering } from 'astro-accelerator-utils';
6
8
 
7
9
  async function getData() {
8
- //@ts-ignore
9
- const allArticles = import.meta.glob(['./**/*.md', './**/*.mdx']);
10
+ //@ts-ignore
11
+ const allArticles = import.meta.glob(['./**/*.md', './**/*.mdx']);
12
+
13
+ const accelerator = new Accelerator(SITE);
14
+ const stats = new accelerator.statistics('pages/articles/feed.xml');
15
+ stats.start();
16
+
17
+ let articles = [];
10
18
 
11
- const accelerator = new Accelerator(SITE);
12
- const stats = new accelerator.statistics('pages/articles/feed.xml');
13
- stats.start();
14
-
15
- let articles = [];
16
-
17
- for (const path in allArticles) {
18
- const article: any = await allArticles[path]();
19
+ for (const path in allArticles) {
20
+ const article: any = await allArticles[path]();
19
21
 
20
- if (PostFiltering.isListable(article)) {
21
- article.frontmatter.title = await accelerator.markdown.getTextFrom(article.frontmatter.title ?? '');
22
+ if (PostFiltering.isListable(article)) {
23
+ article.frontmatter.title = await accelerator.markdown.getTextFrom(
24
+ article.frontmatter?.title
25
+ );
22
26
 
23
- articles.push({
24
- url: article.url,
25
- frontmatter: article.frontmatter
26
- });
27
+ articles.push({
28
+ url: article.url,
29
+ frontmatter: article.frontmatter,
30
+ });
31
+ }
27
32
  }
28
- }
29
33
 
30
- articles = articles.sort((a, b) => b.frontmatter.pubDate.localeCompare(a.frontmatter.pubDate));
34
+ articles = articles.sort((a, b) =>
35
+ b.frontmatter.pubDate.localeCompare(a.frontmatter.pubDate)
36
+ );
31
37
 
32
- const limit = SITE.rssLimit ?? 20;
33
- const items = articles
34
- .slice(0, limit)
35
- .map(a => `
38
+ const limit = SITE.rssLimit ?? 20;
39
+ const items = articles.slice(0, limit).map(
40
+ (a) => `
36
41
  <entry>
37
42
  <title>${a.frontmatter.title ?? ''}</title>
38
- <link href="${ SITE.url + a.url }" />
39
- <id>${ SITE.url + accelerator.urlFormatter.formatAddress(a.url) }</id>
40
- <published>${ a.frontmatter.pubDate }</published>
41
- <updated>${ a.frontmatter.pubDate ?? a.frontmatter.pubDate }</updated>
42
- <summary>${ a.frontmatter.description ?? '' }</summary>
43
- </entry>`);
43
+ <link href="${SITE.url + a.url}" />
44
+ <id>${SITE.url + accelerator.urlFormatter.formatAddress(a.url)}</id>
45
+ <published>${a.frontmatter.pubDate}</published>
46
+ <updated>${a.frontmatter.pubDate ?? a.frontmatter.pubDate}</updated>
47
+ <summary>${a.frontmatter.description ?? ''}</summary>
48
+ </entry>`
49
+ );
44
50
 
45
- stats.stop();
46
-
47
- return new Response(`<?xml version="1.0" encoding="utf-8"?>
51
+ stats.stop();
52
+
53
+ return new Response(
54
+ `<?xml version="1.0" encoding="utf-8"?>
48
55
  <feed xmlns="http://www.w3.org/2005/Atom">
49
- <title>${ SITE.title }</title>
50
- <subtitle>${ SITE.description }</subtitle>
51
- <link href="${ SITE.url }/atom.xml" rel="self" />
52
- <link href="${ SITE.url }" />
53
- <id>${ SITE.url }/atom.xml</id>
54
- <updated>${ articles[0].frontmatter.pubDate }</updated>
56
+ <title>${SITE.title}</title>
57
+ <subtitle>${SITE.description}</subtitle>
58
+ <link href="${SITE.url}/atom.xml" rel="self" />
59
+ <link href="${SITE.url}" />
60
+ <id>${SITE.url}/atom.xml</id>
61
+ <updated>${articles[0].frontmatter.pubDate}</updated>
55
62
  ${items.join('')}
56
- </feed>`, {
57
- status: 200,
58
- headers: {
59
- 'Content-Type': "application/xml"
60
- }
61
- });
63
+ </feed>`,
64
+ {
65
+ status: 200,
66
+ headers: {
67
+ 'Content-Type': 'application/xml',
68
+ },
69
+ }
70
+ );
62
71
  }
63
72
 
64
73
  export const GET = getData;
@@ -1,3 +1,5 @@
1
+ /** @format */
2
+
1
3
  // warning: This file is overwritten by Astro Accelerator
2
4
 
3
5
  import { Accelerator, PostFiltering } from 'astro-accelerator-utils';
@@ -6,7 +8,6 @@ import { SITE } from '@config';
6
8
  import { htmlToText, convert } from 'html-to-text';
7
9
  import keywordExtractor from 'keyword-extractor';
8
10
 
9
-
10
11
  const getData = async () => {
11
12
  //@ts-ignore
12
13
  const allPages = import.meta.glob(['./**/*.md', './**/*.mdx']);
@@ -15,22 +16,26 @@ const getData = async () => {
15
16
  const accelerator = new Accelerator(SITE);
16
17
 
17
18
  for (const path in allPages) {
18
- const page = await allPages[path]() as MarkdownInstance<Record<string, any>>;
19
+ const page = (await allPages[path]()) as MarkdownInstance<
20
+ Record<string, any>
21
+ >;
19
22
 
20
23
  if (!PostFiltering.showInSearch(page)) {
21
24
  continue;
22
25
  }
23
26
 
24
27
  let url = page.url ?? '';
25
-
28
+
26
29
  if (page.frontmatter.paged) {
27
30
  url += '/1/';
28
31
  }
29
32
 
30
33
  const headings = await page.getHeadings();
31
- const title = await accelerator.markdown.getTextFrom(page.frontmatter.title ?? '');
34
+ const title = await accelerator.markdown.getTextFrom(
35
+ page.frontmatter?.title
36
+ );
32
37
  const content = page.compiledContent ? page.compiledContent() : '';
33
- let counted: { word: string, count: number }[] = [];
38
+ let counted: { word: string; count: number }[] = [];
34
39
 
35
40
  if (content) {
36
41
  const text = convert(content, { wordwrap: false });
@@ -38,33 +43,38 @@ const getData = async () => {
38
43
  const words = keywordExtractor.extract(text, {
39
44
  language: 'english',
40
45
  return_changed_case: true,
41
- remove_duplicates: true
46
+ remove_duplicates: true,
42
47
  });
43
48
 
44
- counted = words.map((w) => {
45
- return { word: w, count: words.filter(wd => wd === w).length };
46
- }).filter(e => e.word.replace(/[^a-z]+/g, '').length > 1);
49
+ counted = words
50
+ .map((w) => {
51
+ return {
52
+ word: w,
53
+ count: words.filter((wd) => wd === w).length,
54
+ };
55
+ })
56
+ .filter((e) => e.word.replace(/[^a-z]+/g, '').length > 1);
47
57
  }
48
58
 
49
59
  items.push({
50
60
  title: title,
51
- headings: headings.map(h => {
52
- return {text: h.text, slug: h.slug }
61
+ headings: headings.map((h) => {
62
+ return { text: h.text, slug: h.slug };
53
63
  }),
54
64
  description: page.frontmatter.description ?? '',
55
- keywords: counted.map(c => c.word).join(' '),
65
+ keywords: counted.map((c) => c.word).join(' '),
56
66
  tags: page.frontmatter.tags ?? [],
57
67
  url: SITE.url + accelerator.urlFormatter.formatAddress(url),
58
- date: page.frontmatter.pubDate ?? ''
68
+ date: page.frontmatter.pubDate ?? '',
59
69
  });
60
70
  }
61
71
 
62
72
  return new Response(JSON.stringify(items), {
63
73
  status: 200,
64
74
  headers: {
65
- 'Content-Type': "application/json"
66
- }
75
+ 'Content-Type': 'application/json',
76
+ },
67
77
  });
68
- }
78
+ };
69
79
 
70
80
  export const GET = getData;
@@ -43,7 +43,7 @@ const articles: Article[] = [];
43
43
  for (let p of posts) {
44
44
  const item = {
45
45
  url: p.url ?? '',
46
- title: await accelerator.markdown.getTextFrom(p.frontmatter.title),
46
+ title: await accelerator.markdown.getTextFrom(p.frontmatter?.title),
47
47
  frontmatter: p.frontmatter,
48
48
  img: p.frontmatter.bannerImage
49
49
  ? getImageInfo(p.frontmatter.bannerImage.src, '', SITE.images.listerSize)
@@ -23,7 +23,7 @@ const imageAlt = frontmatter.bannerImage?.alt ?? OPEN_GRAPH.image.alt;
23
23
  const robots = frontmatter.robots ?? 'index, follow';
24
24
  const canonicalImageSrc = new URL(imageSrc, Astro.site);
25
25
  const canonicalURL = accelerator.urlFormatter.formatUrl(new URL(Astro.url.pathname, Astro.site + SITE.subfolder));
26
- const socialTitle = await accelerator.markdown.getTextFrom(frontmatter.title);
26
+ const socialTitle = await accelerator.markdown.getTextFrom(frontmatter?.title);
27
27
  const title = `${ accelerator.markdown.titleCase(socialTitle) } ${ ((frontmatter.titleAdditional) ? ` ${frontmatter.titleAdditional}` : '') } | ${ SITE.title }`;
28
28
 
29
29
  stats.stop();
@@ -29,7 +29,7 @@ stats.stop();
29
29
  <ul class="recent-updates">
30
30
  {pages.map((post) => (
31
31
  <li data-destination={ accelerator.urlFormatter.formatAddress(post.url) }>
32
- <a href={ accelerator.urlFormatter.formatAddress(post.url) }>{ accelerator.markdown.getTextFrom(post.frontmatter.title) }</a>
32
+ <a href={ accelerator.urlFormatter.formatAddress(post.url) }>{ accelerator.markdown.getTextFrom(post.frontmatter?.title) }</a>
33
33
  <time datetime={ (post.frontmatter.modDate || post.frontmatter.pubDate).toString() }>
34
34
  { accelerator.dateFormatter.formatDate((post.frontmatter.modDate || post.frontmatter.pubDate), lang) }
35
35
  </time>