securemark 0.294.7 → 0.294.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/CHANGELOG.md +8 -0
  2. package/dist/index.js +185 -205
  3. package/markdown.d.ts +13 -36
  4. package/package.json +1 -1
  5. package/src/combinator/control/constraint/block.ts +2 -2
  6. package/src/combinator/control/constraint/line.ts +7 -5
  7. package/src/combinator/control/manipulation/convert.ts +2 -1
  8. package/src/combinator/control/manipulation/fence.ts +4 -4
  9. package/src/combinator/control/manipulation/surround.ts +9 -9
  10. package/src/combinator/data/parser/some.ts +1 -1
  11. package/src/combinator/data/parser/union.ts +6 -2
  12. package/src/parser/api/bind.test.ts +0 -1
  13. package/src/parser/api/normalize.test.ts +5 -8
  14. package/src/parser/api/normalize.ts +4 -2
  15. package/src/parser/autolink.ts +1 -2
  16. package/src/parser/block/blockquote.ts +3 -3
  17. package/src/parser/block/extension/fig.ts +4 -1
  18. package/src/parser/block/heading.ts +12 -2
  19. package/src/parser/block/reply/quote.ts +1 -2
  20. package/src/parser/block/ulist.ts +1 -1
  21. package/src/parser/block.ts +0 -4
  22. package/src/parser/header.ts +28 -40
  23. package/src/parser/inline/annotation.ts +2 -3
  24. package/src/parser/inline/autolink/account.ts +49 -17
  25. package/src/parser/inline/autolink/anchor.test.ts +0 -1
  26. package/src/parser/inline/autolink/anchor.ts +16 -15
  27. package/src/parser/inline/autolink/email.test.ts +1 -1
  28. package/src/parser/inline/autolink/email.ts +10 -11
  29. package/src/parser/inline/autolink/hashnum.ts +17 -13
  30. package/src/parser/inline/autolink/hashtag.ts +19 -15
  31. package/src/parser/inline/autolink/url.ts +24 -19
  32. package/src/parser/inline/autolink.ts +1 -2
  33. package/src/parser/inline/bracket.ts +14 -14
  34. package/src/parser/inline/deletion.ts +2 -1
  35. package/src/parser/inline/emphasis.ts +2 -1
  36. package/src/parser/inline/emstrong.ts +2 -1
  37. package/src/parser/inline/extension/index.ts +4 -4
  38. package/src/parser/inline/extension/indexer.ts +1 -1
  39. package/src/parser/inline/extension/label.ts +1 -1
  40. package/src/parser/inline/extension/placeholder.ts +4 -3
  41. package/src/parser/inline/html.ts +4 -4
  42. package/src/parser/inline/htmlentity.ts +2 -2
  43. package/src/parser/inline/insertion.ts +2 -1
  44. package/src/parser/inline/italic.ts +2 -1
  45. package/src/parser/inline/link.ts +12 -24
  46. package/src/parser/inline/mark.ts +2 -1
  47. package/src/parser/inline/math.ts +4 -2
  48. package/src/parser/inline/media.ts +28 -33
  49. package/src/parser/inline/reference.ts +4 -4
  50. package/src/parser/inline/remark.ts +2 -1
  51. package/src/parser/inline/ruby.ts +3 -4
  52. package/src/parser/inline/strong.ts +2 -1
  53. package/src/parser/inline/template.ts +10 -10
  54. package/src/parser/segment.ts +2 -2
  55. package/src/parser/source/escapable.ts +3 -4
  56. package/src/parser/source/line.ts +3 -1
  57. package/src/parser/source/text.ts +5 -10
  58. package/src/parser/source/unescapable.ts +2 -4
  59. package/src/parser/source.ts +1 -2
  60. package/src/parser/inline/autolink/channel.ts +0 -44
package/markdown.d.ts CHANGED
@@ -614,7 +614,6 @@ export namespace MarkdownParser {
614
614
  Parser<HTMLSpanElement | HTMLBRElement, Context, [
615
615
  InlineParser.MathParser,
616
616
  InlineParser.AutolinkParser,
617
- SourceParser.LinebreakParser,
618
617
  SourceParser.UnescapableSourceParser,
619
618
  ]> {
620
619
  }
@@ -808,13 +807,6 @@ export namespace MarkdownParser {
808
807
  LinkParser.ParameterParser,
809
808
  ]> {
810
809
  }
811
- export interface UnsafeLinkParser extends
812
- Inline<'link/unsafelink'>,
813
- Parser<HTMLAnchorElement, Context, [
814
- LinkParser.TextParser,
815
- LinkParser.ParameterParser,
816
- ]> {
817
- }
818
810
  export interface ContentParser extends
819
811
  Inline<'link/content'>,
820
812
  Parser<List<Data<string | HTMLElement>>, Context, [
@@ -1098,7 +1090,6 @@ export namespace MarkdownParser {
1098
1090
  AutolinkParser.UrlParser.LineUrlParser,
1099
1091
  AutolinkParser.UrlParser,
1100
1092
  AutolinkParser.EmailParser,
1101
- AutolinkParser.ChannelParser,
1102
1093
  AutolinkParser.AccountParser,
1103
1094
  AutolinkParser.HashtagParser,
1104
1095
  AutolinkParser.HashnumParser,
@@ -1110,7 +1101,7 @@ export namespace MarkdownParser {
1110
1101
  // https://host
1111
1102
  Inline<'url'>,
1112
1103
  Parser<string | HTMLElement, Context, [
1113
- LinkParser.UnsafeLinkParser,
1104
+ Parser<HTMLAnchorElement, Context, []>,
1114
1105
  InlineParser,
1115
1106
  ]> {
1116
1107
  }
@@ -1120,7 +1111,7 @@ export namespace MarkdownParser {
1120
1111
  Parser<string | HTMLElement, Context, [
1121
1112
  SourceParser.StrParser,
1122
1113
  Parser<string | HTMLElement, Context, [
1123
- InlineParser.LinkParser.UnsafeLinkParser,
1114
+ Parser<HTMLAnchorElement, Context, []>,
1124
1115
  InlineParser,
1125
1116
  ]>,
1126
1117
  ]> {
@@ -1147,43 +1138,37 @@ export namespace MarkdownParser {
1147
1138
  export interface EmailParser extends
1148
1139
  // user@host
1149
1140
  Inline<'email'>,
1150
- Parser<string | HTMLAnchorElement, Context, [
1151
- Parser<HTMLAnchorElement, Context, []>,
1152
- ]> {
1153
- }
1154
- export interface ChannelParser extends
1155
- // @user#tag
1156
- Inline<'channel'>,
1157
- Parser<string | HTMLAnchorElement, Context, [
1158
- LinkParser.UnsafeLinkParser,
1141
+ Parser<HTMLAnchorElement, Context, [
1142
+ SourceParser.StrParser,
1159
1143
  ]> {
1160
1144
  }
1161
1145
  export interface AccountParser extends
1162
1146
  // @user
1147
+ // @user#tag
1163
1148
  Inline<'account'>,
1164
- Parser<string | HTMLAnchorElement, Context, [
1165
- LinkParser.UnsafeLinkParser,
1149
+ Parser<HTMLAnchorElement, Context, [
1150
+ SourceParser.StrParser,
1166
1151
  ]> {
1167
1152
  }
1168
1153
  export interface HashtagParser extends
1169
1154
  // #tag
1170
1155
  Inline<'hashtag'>,
1171
- Parser<string | HTMLAnchorElement, Context, [
1172
- LinkParser.UnsafeLinkParser,
1156
+ Parser<HTMLAnchorElement, Context, [
1157
+ SourceParser.StrParser,
1173
1158
  ]> {
1174
1159
  }
1175
1160
  export interface HashnumParser extends
1176
1161
  // #1
1177
1162
  Inline<'hashnum'>,
1178
- Parser<string | HTMLAnchorElement, Context, [
1179
- LinkParser.UnsafeLinkParser,
1163
+ Parser<HTMLAnchorElement, Context, [
1164
+ SourceParser.StrParser,
1180
1165
  ]> {
1181
1166
  }
1182
1167
  export interface AnchorParser extends
1183
1168
  // >>1
1184
1169
  Inline<'anchor'>,
1185
- Parser<string | HTMLAnchorElement, Context, [
1186
- LinkParser.UnsafeLinkParser,
1170
+ Parser<HTMLAnchorElement, Context, [
1171
+ SourceParser.StrParser,
1187
1172
  ]> {
1188
1173
  }
1189
1174
  }
@@ -1192,7 +1177,6 @@ export namespace MarkdownParser {
1192
1177
  Markdown<'autolink'>,
1193
1178
  Parser<string | HTMLElement, Context, [
1194
1179
  InlineParser.AutolinkParser,
1195
- SourceParser.LinebreakParser,
1196
1180
  SourceParser.UnescapableSourceParser,
1197
1181
  ]> {
1198
1182
  }
@@ -1210,13 +1194,6 @@ export namespace MarkdownParser {
1210
1194
  TextParser,
1211
1195
  ]> {
1212
1196
  }
1213
- export interface LinebreakParser extends
1214
- // \n
1215
- Source<'linebreak'>,
1216
- Parser<HTMLBRElement, Context, [
1217
- TextParser,
1218
- ]> {
1219
- }
1220
1197
  export interface EscapableSourceParser extends
1221
1198
  // abc
1222
1199
  Source<'escsource'>,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "securemark",
3
- "version": "0.294.7",
3
+ "version": "0.294.9",
4
4
  "description": "Secure markdown renderer working on browsers for user input data.",
5
5
  "private": false,
6
6
  "homepage": "https://github.com/falsandtru/securemark",
@@ -1,5 +1,5 @@
1
1
  import { Parser, failsafe } from '../../data/parser';
2
- import { firstline, isBlank } from './line';
2
+ import { isBlankline } from './line';
3
3
 
4
4
  export function block<P extends Parser<unknown>>(parser: P, separation?: boolean): P;
5
5
  export function block<N>(parser: Parser<N>, separation = true): Parser<N> {
@@ -10,7 +10,7 @@ export function block<N>(parser: Parser<N>, separation = true): Parser<N> {
10
10
  if (position === source.length) return;
11
11
  const result = parser(input);
12
12
  if (result === undefined) return;
13
- if (separation && !isBlank(firstline(source, context.position))) return;
13
+ if (separation && !isBlankline(source, context.position)) return;
14
14
  assert(context.position === source.length || source[context.position - 1] === '\n');
15
15
  return context.position === source.length || source[context.position - 1] === '\n'
16
16
  ? result
@@ -16,7 +16,7 @@ export function line<N>(parser: Parser<N>): Parser<N> {
16
16
  context.source = source;
17
17
  context.offset -= position;
18
18
  if (result === undefined) return;
19
- if (!isBlank(source.slice(context.position, position + line.length))) return;
19
+ if (context.position < position + line.length && !isBlankline(source, context.position)) return;
20
20
  context.position = position + line.length;
21
21
  return result;
22
22
  });
@@ -29,8 +29,10 @@ export function firstline(source: string, position: number): string {
29
29
  : source.slice(position, i + 1);
30
30
  }
31
31
 
32
- export function isBlank(line: string): boolean {
33
- return line === ''
34
- || line === '\n'
35
- || line.trimStart() === '';
32
+ const blankline = /[^\S\n]*(?:$|\n)/y;
33
+ export function isBlankline(source: string, position: number): boolean {
34
+ blankline.lastIndex = position;
35
+ return source.length === position
36
+ || source[position] === '\n'
37
+ || blankline.test(source);
36
38
  }
@@ -8,6 +8,7 @@ export function convert<N>(conv: (source: string, context: Ctx) => string, parse
8
8
  const { source, position } = context;
9
9
  if (position === source.length) return;
10
10
  const src = conv(source.slice(position), context);
11
+ assert(context.position === position);
11
12
  if (src === '') {
12
13
  if (!empty) return;
13
14
  context.position = source.length;
@@ -22,7 +23,7 @@ export function convert<N>(conv: (source: string, context: Ctx) => string, parse
22
23
  return result;
23
24
  }
24
25
  else {
25
- const { offset, backtracks } = context;
26
+ const { offset, backtracks } = context;
26
27
  const result = parser(subinput(src, context));
27
28
  context.position = context.source.length
28
29
  assert(context.offset === offset);
@@ -1,6 +1,6 @@
1
1
  import { Parser, List, Data, Ctx, failsafe } from '../../data/parser';
2
2
  import { consume } from '../../../combinator';
3
- import { firstline, isBlank } from '../constraint/line';
3
+ import { firstline, isBlankline } from '../constraint/line';
4
4
  import { push } from 'spica/array';
5
5
 
6
6
  export function fence<C extends Ctx, D extends Parser<unknown, C>[]>(opener: RegExp, limit: number, separation = true): Parser<string, C, D> {
@@ -20,20 +20,20 @@ export function fence<C extends Ctx, D extends Parser<unknown, C>[]>(opener: Reg
20
20
  context.position += matches[0].length;
21
21
  // Prevent annoying parsing in editing.
22
22
  const secondline = firstline(source, context.position);
23
- if (isBlank(secondline) && firstline(source, context.position + secondline.length).trimEnd() !== delim) return;
23
+ if (isBlankline(secondline, 0) && firstline(source, context.position + secondline.length).trimEnd() !== delim) return;
24
24
  let block = '';
25
25
  let closer = '';
26
26
  let overflow = '';
27
27
  for (let count = 1; ; ++count) {
28
28
  if (context.position === source.length) break;
29
29
  const line = firstline(source, context.position);
30
- if ((closer || count > limit + 1) && isBlank(line)) break;
30
+ if ((closer || count > limit + 1) && isBlankline(line, 0)) break;
31
31
  if(closer) {
32
32
  overflow += line;
33
33
  }
34
34
  if (!closer && count <= limit + 1 && line.slice(0, delim.length) === delim && line.trimEnd() === delim) {
35
35
  closer = line;
36
- if (isBlank(firstline(source, context.position + line.length))) {
36
+ if (isBlankline(source, context.position + line.length)) {
37
37
  context.position += line.length;
38
38
  break;
39
39
  }
@@ -4,51 +4,51 @@ import { matcher, clear } from '../../../combinator';
4
4
  export function surround<P extends Parser<unknown>, S = string>(
5
5
  opener: string | RegExp | Parser<S, Context<P>>, parser: IntermediateParser<P>, closer: string | RegExp | Parser<S, Context<P>>,
6
6
  optional?: false,
7
+ backtracks?: readonly number[],
7
8
  f?: (rss: [List<Data<S>>, List<Data<SubNode<P>>>, List<Data<S>>], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
8
9
  g?: (rss: [List<Data<S>>, List<Data<SubNode<P>>> | undefined], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
9
- backtracks?: readonly number[],
10
10
  ): P;
11
11
  export function surround<P extends Parser<unknown>, S = string>(
12
12
  opener: string | RegExp | Parser<S, Context<P>>, parser: IntermediateParser<P>, closer: string | RegExp | Parser<S, Context<P>>,
13
13
  optional?: boolean,
14
+ backtracks?: readonly number[],
14
15
  f?: (rss: [List<Data<S>>, List<Data<SubNode<P>>> | undefined, List<Data<S>>], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
15
16
  g?: (rss: [List<Data<S>>, List<Data<SubNode<P>>> | undefined], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
16
- backtracks?: readonly number[],
17
17
  ): P;
18
18
  export function surround<P extends Parser<unknown>, S = string>(
19
19
  opener: string | RegExp | Parser<S, Context<P>>, parser: P, closer: string | RegExp | Parser<S, Context<P>>,
20
20
  optional?: false,
21
+ backtracks?: readonly number[],
21
22
  f?: (rss: [List<Data<S>>, List<Data<Node<P>>>, List<Data<S>>], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
22
23
  g?: (rss: [List<Data<S>>, List<Data<Node<P>>> | undefined], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
23
- backtracks?: readonly number[],
24
24
  ): P;
25
25
  export function surround<P extends Parser<unknown>, S = string>(
26
26
  opener: string | RegExp | Parser<S, Context<P>>, parser: P, closer: string | RegExp | Parser<S, Context<P>>,
27
27
  optional?: boolean,
28
+ backtracks?: readonly number[],
28
29
  f?: (rss: [List<Data<S>>, List<Data<Node<P>>> | undefined, List<Data<S>>], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
29
30
  g?: (rss: [List<Data<S>>, List<Data<Node<P>>> | undefined], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
30
- backtracks?: readonly number[],
31
31
  ): P;
32
32
  export function surround<P extends Parser<string>, S = string>(
33
33
  opener: string | RegExp | Parser<S, Context<P>>, parser: string | RegExp | P, closer: string | RegExp | Parser<S, Context<P>>,
34
34
  optional?: false,
35
+ backtracks?: readonly number[],
35
36
  f?: (rss: [List<Data<S>>, List<Data<Node<P>>>, List<Data<S>>], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
36
37
  g?: (rss: [List<Data<S>>, List<Data<Node<P>>> | undefined], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
37
- backtracks?: readonly number[],
38
38
  ): P;
39
39
  export function surround<P extends Parser<string>, S = string>(
40
40
  opener: string | RegExp | Parser<S, Context<P>>, parser: string | RegExp | P, closer: string | RegExp | Parser<S, Context<P>>,
41
41
  optional?: boolean,
42
+ backtracks?: readonly number[],
42
43
  f?: (rss: [List<Data<S>>, List<Data<Node<P>>> | undefined, List<Data<S>>], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
43
44
  g?: (rss: [List<Data<S>>, List<Data<Node<P>>> | undefined], context: Context<P>) => Result<Node<P>, Context<P>, SubParsers<P>>,
44
- backtracks?: readonly number[],
45
45
  ): P;
46
46
  export function surround<N>(
47
47
  opener: string | RegExp | Parser<N>, parser: string | RegExp | Parser<N>, closer: string | RegExp | Parser<N>,
48
48
  optional: boolean = false,
49
+ backtracks: readonly number[] = [],
49
50
  f?: (rss: [List<Data<N>>, List<Data<N>>, List<Data<N>>], context: Ctx) => Result<N>,
50
51
  g?: (rss: [List<Data<N>>, List<Data<N>> | undefined], context: Ctx) => Result<N>,
51
- backtracks: readonly number[] = [],
52
52
  ): Parser<N> {
53
53
  switch (typeof opener) {
54
54
  case 'string':
@@ -129,7 +129,7 @@ export function open<N>(
129
129
  optional?: boolean,
130
130
  backtracks?: readonly number[],
131
131
  ): Parser<N> {
132
- return surround(opener, parser as Parser<N>, '', optional, undefined, undefined, backtracks);
132
+ return surround(opener, parser as Parser<N>, '', optional, backtracks);
133
133
  }
134
134
  export function close<P extends Parser<unknown>>(
135
135
  parser: P,
@@ -149,7 +149,7 @@ export function close<N>(
149
149
  optional?: boolean,
150
150
  backtracks?: readonly number[],
151
151
  ): Parser<N> {
152
- return surround('', parser as Parser<N>, closer, optional, undefined, undefined, backtracks);
152
+ return surround('', parser as Parser<N>, closer, optional, backtracks);
153
153
  }
154
154
 
155
155
  const statesize = 2;
@@ -24,7 +24,7 @@ export function some<N>(parser: Parser<N>, end?: string | RegExp | number, delim
24
24
  context.delimiters.push(delims);
25
25
  }
26
26
  // whileは数倍遅い
27
- for (; context.position < source.length;) {
27
+ for (const len = source.length; context.position < len;) {
28
28
  if (match(input)) break;
29
29
  if (context.delimiters?.match(input)) break;
30
30
  const result = parser(input);
@@ -10,8 +10,12 @@ export function union<N, D extends Parser<N>[]>(parsers: D): Parser<N, Ctx, D> {
10
10
  return parsers[0];
11
11
  default:
12
12
  return eval([
13
+ '((',
14
+ parsers.map((_, i) => `parser${i},`).join(''),
15
+ ') =>',
13
16
  'input =>',
14
- parsers.map((_, i) => `|| parsers[${i}](input)`).join('').slice(2),
15
- ].join(''));
17
+ parsers.map((_, i) => `|| parser${i}(input)`).join('').slice(2),
18
+ ')',
19
+ ].join(''))(...parsers);
16
20
  }
17
21
  }
@@ -48,7 +48,6 @@ describe('Unit: parser/api/bind', () => {
48
48
  [
49
49
  '<h1 class="error">Error: Too large segment over 100,000 bytes.</h1>',
50
50
  `<pre class="error" translate="no">${'\n'.repeat(997)}...</pre>`,
51
- '<h1 class="error">Error: Too large segment over 100,000 bytes.</h1>',
52
51
  ]);
53
52
  });
54
53
 
@@ -2,10 +2,6 @@ import { normalize, escape } from './normalize';
2
2
 
3
3
  describe('Unit: parser/normalize', () => {
4
4
  describe('normalize', () => {
5
- it('invalid surrogate pairs', () => {
6
- assert(normalize('\uDC00\uD800') === '\uFFFD\uFFFD');
7
- });
8
-
9
5
  it('controls', () => {
10
6
  assert(normalize('\r') === '\n');
11
7
  assert(normalize('\r\n') === '\n');
@@ -50,10 +46,11 @@ describe('Unit: parser/normalize', () => {
50
46
  assert(normalize('\u202A') === '\uFFFD');
51
47
  assert(normalize('\u202F') === '\uFFFD');
52
48
  assert(normalize('\uFEFF') === '\uFFFD');
53
- assert(normalize('\u180E') === '\uFFFD');
54
- assert(normalize('\u1820\u180E') === '\u1820\u180E');
55
- assert(normalize('\u1821\u180E') === '\u1821\u180E');
56
- assert(normalize('\u1822\u180E') === '\u1822\uFFFD');
49
+ //assert(normalize('\u180E') === '\uFFFD');
50
+ //assert(normalize('\u1820\u180E') === '\u1820\u180E');
51
+ //assert(normalize('\u1821\u180E') === '\u1821\u180E');
52
+ //assert(normalize('\u1822\u180E') === '\u1822\uFFFD');
53
+ //assert(normalize('\uDC00\uD800') === '\uFFFD\uFFFD');
57
54
  });
58
55
 
59
56
  it('header', () => {
@@ -14,8 +14,10 @@ function format(source: string): string {
14
14
 
15
15
  const invalid = new RegExp([
16
16
  /(?![\t\r\n])[\x00-\x1F\x7F]/g.source,
17
- /(?!\u200D)[\u2006\u200B-\u200F\u202A-\u202F\u2060\uFEFF]|(?<![\u1820\u1821])\u180E/g.source,
18
- /[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]/g.source,
17
+ /(?!\u200D)[\u2006\u200B-\u200F\u202A-\u202F\u2060\uFEFF]/g.source,
18
+ // 後読みが重い
19
+ ///(?<![\u1820\u1821])\u180E/g.source,
20
+ ///[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]/g.source,
19
21
  ].join('|'), 'g');
20
22
  function sanitize(source: string): string {
21
23
  return source.replace(invalid, UNICODE_REPLACEMENT_CHARACTER);
@@ -1,13 +1,12 @@
1
1
  import { MarkdownParser } from '../../markdown';
2
2
  import { union, some, lazy } from '../combinator';
3
3
  import { autolink as autolink_ } from './inline/autolink';
4
- import { linebreak, unescsource } from './source';
4
+ import { unescsource } from './source';
5
5
 
6
6
  export import AutolinkParser = MarkdownParser.AutolinkParser;
7
7
 
8
8
  export const autolink: AutolinkParser = lazy(() =>
9
9
  some(union([
10
10
  autolink_,
11
- linebreak,
12
11
  unescsource,
13
12
  ])));
@@ -1,7 +1,7 @@
1
1
  import { BlockquoteParser } from '../block';
2
2
  import { Recursion } from '../context';
3
3
  import { List, Data } from '../../combinator/data/parser';
4
- import { union, some, creation, recursion, block, validate, rewrite, open, convert, lazy, fmap } from '../../combinator';
4
+ import { union, some, consume, recursion, block, validate, rewrite, open, convert, lazy, fmap } from '../../combinator';
5
5
  import { autolink } from '../autolink';
6
6
  import { contentline } from '../source';
7
7
  import { unwrap } from '../util';
@@ -37,10 +37,10 @@ const markdown: BlockquoteParser.MarkdownParser = lazy(() => fmap(
37
37
  rewrite(
38
38
  indent,
39
39
  convert(unindent, markdown, false, true)),
40
- creation(10,
41
40
  rewrite(
42
41
  some(contentline, opener),
43
42
  convert(unindent, ({ context }) => {
43
+ consume(10, context);
44
44
  const { source } = context;
45
45
  const references = html('ol', { class: 'references' });
46
46
  const document = parse(source, {
@@ -51,6 +51,6 @@ const markdown: BlockquoteParser.MarkdownParser = lazy(() => fmap(
51
51
  }, context);
52
52
  context.position = source.length;
53
53
  return new List([new Data(html('section', [document, html('h2', 'References'), references]))]);
54
- }, false, true))),
54
+ }, false, true)),
55
55
  ]))),
56
56
  ns => new List([new Data(html('blockquote', unwrap(ns)))])));
@@ -30,7 +30,10 @@ export const fig: FigParser = block(rewrite(segment, verify(convert(
30
30
  // Bug: TypeScript
31
31
  const fence = (/^[^\n]*\n!?>+ /.test(source) && source.match(/^~{3,}(?=[^\S\n]*$)/mg) as string[] || [])
32
32
  .reduce((max, fence) => fence > max ? fence : max, '~~') + '~';
33
- return parser({ context })
33
+ const { position } = context;
34
+ const result = parser({ context });
35
+ context.position = position;
36
+ return result
34
37
  ? `${fence}figure ${source.replace(/^(.+\n.+\n)([\S\s]+?)\n?$/, '$1\n$2')}\n${fence}`
35
38
  : `${fence}figure ${source}\n\n${fence}`;
36
39
  },
@@ -1,7 +1,7 @@
1
1
  import { HeadingParser } from '../block';
2
2
  import { State } from '../context';
3
3
  import { List, Data } from '../../combinator/data/parser';
4
- import { union, some, state, block, line, focus, rewrite, open, fmap } from '../../combinator';
4
+ import { union, some, state, block, line, focus, rewrite, open, fmap, firstline } from '../../combinator';
5
5
  import { inline, indexee, indexer, dataindex } from '../inline';
6
6
  import { str } from '../source';
7
7
  import { visualize, trimBlank } from '../visibility';
@@ -10,7 +10,17 @@ import { html, defrag } from 'typed-dom/dom';
10
10
 
11
11
  export const segment: HeadingParser.SegmentParser = block(focus(
12
12
  /#+ +\S[^\n]*(?:\n#+(?=$|[ \n])[^\n]*)*(?:$|\n)/y,
13
- some(line(({ context: { source } }) => new List([new Data(source)])))));
13
+ input => {
14
+ const { context } = input;
15
+ const { source } = context;
16
+ const acc = new List<Data<string>>();
17
+ for (; context.position < source.length;) {
18
+ const line = firstline(source, context.position);
19
+ acc.push(new Data(line));
20
+ context.position += line.length;
21
+ }
22
+ return acc;
23
+ }));
14
24
 
15
25
  export const heading: HeadingParser = block(rewrite(segment,
16
26
  // その他の表示制御は各所のCSSで行う。
@@ -3,7 +3,7 @@ import { List, Data } from '../../../combinator/data/parser';
3
3
  import { union, some, block, validate, rewrite, convert, lazy, fmap } from '../../../combinator';
4
4
  import { math } from '../../inline/math';
5
5
  import { autolink } from '../../inline/autolink';
6
- import { linebreak, unescsource, anyline } from '../../source';
6
+ import { unescsource, anyline } from '../../source';
7
7
  import { unwrap } from '../../util';
8
8
  import { html, defrag } from 'typed-dom/dom';
9
9
 
@@ -19,7 +19,6 @@ export const quote: ReplyParser.QuoteParser = lazy(() => block(fmap(
19
19
  // quote補助関数が残した数式をパースする。
20
20
  math,
21
21
  autolink,
22
- linebreak,
23
22
  unescsource,
24
23
  ])),
25
24
  false)),
@@ -10,7 +10,7 @@ import { unwrap } from '../util';
10
10
  import { html, defrag } from 'typed-dom/dom';
11
11
 
12
12
  export const ulist: UListParser = lazy(() => block(validate(
13
- /- /y,
13
+ '- ',
14
14
  ulist_)));
15
15
 
16
16
  export const ulist_: UListParser = lazy(() => block(fmap(validate(
@@ -62,10 +62,6 @@ export const block: BlockParser = reset(
62
62
  if (position === source.length) return;
63
63
  const fst = source[position];
64
64
  switch (fst) {
65
- case '\n':
66
- assert(source.trim() === '');
67
- input.context.position = source.length;
68
- return new List();
69
65
  case '=':
70
66
  if (source.startsWith('===', position)) return pagebreak(input);
71
67
  break;
@@ -1,7 +1,6 @@
1
1
  import { MarkdownParser } from '../../markdown';
2
2
  import { List, Data } from '../combinator/data/parser';
3
- import { union, inits, some, block, line, validate, focus, rewrite, clear, convert, lazy, fmap } from '../combinator';
4
- import { segment } from './segment';
3
+ import { union, inits, some, block, line, validate, focus, clear, convert, lazy, fmap } from '../combinator';
5
4
  import { str } from './source';
6
5
  import { unwrap, invalid } from './util';
7
6
  import { normalize } from './api/normalize';
@@ -10,44 +9,33 @@ import { html, defrag } from 'typed-dom/dom';
10
9
  export const header: MarkdownParser.HeaderParser = lazy(() => validate(
11
10
  /---+ *\r?\n(?=\S)/y,
12
11
  inits([
13
- rewrite(
14
- ({ context }) => {
15
- const { source } = context;
16
- if (context.header ?? true) {
17
- context.position += segment(source).next().value!.length;
18
- }
19
- else {
20
- context.position = source.length;
21
- }
22
- return new List();
23
- },
24
- block(
25
- union([
26
- validate(({ context }) => context.header ?? true,
27
- focus(/(---+) *\r?\n(?:[A-Za-z][0-9A-Za-z]*(?:-[0-9A-Za-z]+)*:[ \t]+\S[^\r\n]*\r?\n){1,100}\1 *(?:$|\r?\n)/y,
28
- convert(source =>
29
- normalize(source.slice(source.indexOf('\n') + 1, source.trimEnd().lastIndexOf('\n'))).replace(/(\S)\s+$/mg, '$1'),
30
- fmap(
31
- some(union([field])),
32
- ns => new List([
33
- new Data(html('aside', { class: 'header' }, [
34
- html('details',
35
- { open: '' },
36
- defrag(unwrap(ns.unshift(new Data(html('summary', 'Header'))) && ns))),
37
- ])),
38
- ])), false))),
39
- ({ context }) => {
40
- const { source, position } = context;
41
- context.position += source.length;
42
- return new List([
43
- new Data(html('pre', {
44
- class: 'invalid',
45
- translate: 'no',
46
- ...invalid('header', 'syntax', 'Invalid syntax'),
47
- }, normalize(source.slice(position)))),
48
- ]);
49
- },
50
- ]))),
12
+ block(
13
+ union([
14
+ validate(({ context }) => context.header ?? true,
15
+ focus(/(---+) *\r?\n(?:[A-Za-z][0-9A-Za-z]*(?:-[0-9A-Za-z]+)*:[ \t]+\S[^\r\n]*\r?\n){1,100}\1 *(?:$|\r?\n)/y,
16
+ convert(source =>
17
+ normalize(source.slice(source.indexOf('\n') + 1, source.trimEnd().lastIndexOf('\n'))).replace(/(\S)\s+$/mg, '$1'),
18
+ fmap(
19
+ some(union([field])),
20
+ ns => new List([
21
+ new Data(html('aside', { class: 'header' }, [
22
+ html('details',
23
+ { open: '' },
24
+ defrag(unwrap(ns.unshift(new Data(html('summary', 'Header'))) && ns))),
25
+ ])),
26
+ ])), false))),
27
+ ({ context }) => {
28
+ const { source, position } = context;
29
+ context.position += source.length;
30
+ return new List([
31
+ new Data(html('pre', {
32
+ class: 'invalid',
33
+ translate: 'no',
34
+ ...invalid('header', 'syntax', 'Invalid syntax'),
35
+ }, normalize(source.slice(position)))),
36
+ ]);
37
+ },
38
+ ])),
51
39
  clear(str(/ *\r?\n/y)),
52
40
  ])));
53
41
 
@@ -13,9 +13,8 @@ export const annotation: AnnotationParser = lazy(() => constraint(State.annotati
13
13
  trimBlankStart(some(union([inline]), ')', [[')', 1]])))),
14
14
  '))',
15
15
  false,
16
+ [1 | Backtrack.bracket, 3 | Backtrack.doublebracket],
16
17
  ([, ns], context) =>
17
18
  context.linebreak === 0
18
19
  ? new List([new Data(html('sup', { class: 'annotation' }, [html('span', defrag(unwrap(trimBlankNodeEnd(ns))))]))])
19
- : undefined,
20
- undefined,
21
- [1 | Backtrack.bracket, 3 | Backtrack.doublebracket])));
20
+ : undefined)));