reffy 9.1.2 → 10.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/LICENSE +21 -21
  2. package/README.md +151 -151
  3. package/index.js +9 -9
  4. package/package.json +53 -53
  5. package/reffy.js +294 -294
  6. package/src/browserlib/create-outline.mjs +352 -352
  7. package/src/browserlib/extract-cssdfn.mjs +405 -405
  8. package/src/browserlib/extract-dfns.mjs +687 -687
  9. package/src/browserlib/extract-editors.mjs~ +14 -0
  10. package/src/browserlib/extract-elements.mjs +205 -205
  11. package/src/browserlib/extract-events.mjs +360 -360
  12. package/src/browserlib/extract-events.mjs~ +3 -0
  13. package/src/browserlib/extract-headings.mjs +48 -48
  14. package/src/browserlib/extract-ids.mjs +28 -28
  15. package/src/browserlib/extract-links.mjs +29 -28
  16. package/src/browserlib/extract-references.mjs +203 -203
  17. package/src/browserlib/extract-webidl.mjs +134 -134
  18. package/src/browserlib/generate-es-dfn-report.sh~ +4 -0
  19. package/src/browserlib/get-absolute-url.mjs +21 -21
  20. package/src/browserlib/get-generator.mjs +26 -26
  21. package/src/browserlib/get-lastmodified-date.mjs +13 -13
  22. package/src/browserlib/get-title.mjs +11 -11
  23. package/src/browserlib/informative-selector.mjs +16 -16
  24. package/src/browserlib/map-ids-to-headings.mjs +136 -136
  25. package/src/browserlib/reffy.json +57 -57
  26. package/src/cli/check-missing-dfns.js +624 -624
  27. package/src/cli/csstree-grammar-check.js +28 -0
  28. package/src/cli/csstree-grammar-check.js~ +10 -0
  29. package/src/cli/csstree-grammar-parser.js +11 -0
  30. package/src/cli/csstree-grammar-parser.js~ +1 -0
  31. package/src/cli/extract-editors.js~ +38 -0
  32. package/src/cli/merge-crawl-results.js +128 -128
  33. package/src/cli/parse-webidl.js +436 -436
  34. package/src/cli/process-specs.js~ +28 -0
  35. package/src/lib/css-grammar-parse-tree.schema.json +109 -109
  36. package/src/lib/css-grammar-parser.js +440 -440
  37. package/src/lib/fetch.js +56 -56
  38. package/src/lib/nock-server.js +127 -127
  39. package/src/lib/post-processor.js +270 -269
  40. package/src/lib/specs-crawler.js +597 -597
  41. package/src/lib/util.js +1022 -1022
  42. package/src/postprocessing/annotate-links.js +43 -0
  43. package/src/postprocessing/annotate-links.js~ +8 -0
  44. package/src/postprocessing/csscomplete.js +50 -50
  45. package/src/postprocessing/events.js +215 -215
  46. package/src/postprocessing/idlnames.js +389 -389
  47. package/src/postprocessing/idlparsed.js +31 -31
  48. package/src/specs/missing-css-rules.json +197 -197
  49. package/src/specs/spec-equivalents.json +149 -149
  50. package/src/browserlib/canonicalize-url.mjs +0 -51
package/LICENSE CHANGED
@@ -1,21 +1,21 @@
1
- The MIT License (MIT)
2
-
3
- Copyright (c) 2020 World Wide Web Consortium
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2020 World Wide Web Consortium
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md CHANGED
@@ -1,151 +1,151 @@
1
- # Reffy
2
-
3
- <img align="right" width="256" height="256" src="images/reffy-512.png" alt="Reffy, represented as a brave little worm with a construction helmet, ready to crawl specs">
4
-
5
- Reffy is a **Web spec crawler** tool. It is notably used to update [Webref](https://github.com/w3c/webref#webref) every 6 hours.
6
-
7
- The code features a generic crawler that can fetch Web specifications and generate machine-readable extracts out of them. Created extracts include lists of CSS properties, definitions, IDL, links and references contained in the specification.
8
-
9
- ## How to use
10
-
11
- ### Pre-requisites
12
-
13
- To install Reffy, you need [Node.js](https://nodejs.org/en/) 14 or greater.
14
-
15
- ### Installation
16
-
17
- Reffy is available as an NPM package. To install the package globally, run:
18
-
19
- ```bash
20
- npm install -g reffy
21
- ```
22
-
23
- This will install Reffy as a command-line interface tool.
24
-
25
- The list of specs crawled by default evolves regularly. To make sure that you run the latest version, use:
26
-
27
- ```bash
28
- npm update -g reffy
29
- ```
30
-
31
- ### Launch Reffy
32
-
33
- Reffy crawls requested specifications and runs a set of processing modules on the content fetched to create relevant extracts from each spec. Which specs get crawled, and which processing modules get run depend on how the crawler gets called. By default, the crawler crawls all specs defined in [browser-specs](https://github.com/w3c/browser-specs/) and runs all core processing modules defined in the [`browserlib`](https://github.com/w3c/reffy/tree/main/src/browserlib) folder.
34
-
35
- Reffy can also run post-processing modules on the results of the crawl to create additional views of the data extracted from the spec during the crawl.
36
-
37
- Crawl results will either be returned to the console or saved in individual files in a report folder when the `--output` parameter is set.
38
-
39
- Examples of information that can be extracted from the specs:
40
-
41
- 1. Generic information such as the title of the spec or the URL of the Editor's Draft. This information is typically copied over from [browser-specs](https://github.com/w3c/browser-specs/).
42
- 2. The list of terms that the spec defines, in a format suitable for ingestion in cross-referencing tools such as [ReSpec](https://respec.org/xref/).
43
- 3. The list of IDs, the list of headings and the list of links in the spec.
44
- 4. The list of normative/informative references found in the spec.
45
- 5. Extended information about WebIDL term definitions and references that the spec contains
46
- 6. For CSS specs, the list of CSS properties, descriptors and value spaces that the spec defines.
47
-
48
- The crawler can be fully parameterized to crawl a specific list of specs and run a custom set of processing modules on them. For example:
49
-
50
- - To extract the raw IDL defined in Fetch, run:
51
- ```bash
52
- reffy --spec fetch --module idl
53
- ```
54
- - To retrieve the list of specs that the HTML spec references, run (noting that crawling the HTML spec takes some time due to it being a multipage spec):
55
- ```bash
56
- reffy --spec html --module refs
57
- ```
58
- - To extract the list of CSS properties defined in CSS Flexible Box Layout Module Level 1, run:
59
- ```bash
60
- reffy --spec css-flexbox-1 --module css
61
- ```
62
- - To extract the list of terms defined in WAI ARIA 1.2, run:
63
- ```bash
64
- reffy --spec wai-aria-1.2 --module dfns
65
- ```
66
- - To run an hypothetical `extract-editors.mjs` processing module and create individual spec extracts with the result of the processing under an `editors` folder for all specs in browser-specs, run:
67
- ```bash
68
- reffy --output reports/test --module editors:extract-editors.mjs
69
- ```
70
-
71
- You may add `--terse` (or `-t`) to the above commands to access the extracts directly.
72
-
73
- Run `reffy -h` for a complete list of options and usage details.
74
-
75
-
76
- Some notes:
77
-
78
- * The crawler may take a few minutes, depending on the number of specs it needs to crawl.
79
- * The crawler uses a local cache for HTTP exchanges. It will create and fill a `.cache` subfolder in particular.
80
- * If you cloned the repo instead of installing Reffy globally, replace `reffy` width `node reffy.js` in the above example to run Reffy.
81
-
82
-
83
- ## Additional tools
84
-
85
- Additional CLI tools in the `src/cli` folder complete the main specs crawler.
86
-
87
-
88
- ### WebIDL parser
89
-
90
- The **WebIDL parser** takes the relative path to an IDL extract and generates a JSON structure that describes WebIDL term definitions and references that the spec contains. The parser uses [WebIDL2](https://github.com/darobin/webidl2.js/) to parse the WebIDL content found in the spec. To run the WebIDL parser: `node src/cli/parse-webidl.js [idlfile]`
91
-
92
- To create the WebIDL extract in the first place, you will need to run the `idl` module in Reffy, as in:
93
-
94
- ```bash
95
- reffy --spec fetch --module idl > fetch.idl
96
- ```
97
-
98
-
99
- ### Crawl results merger
100
-
101
- The **crawl results merger** merges a new JSON crawl report into a reference one. This tool is typically useful to replace the crawl results of a given specification with the results of a new run of the crawler on that specification. To run the crawl results merger: `node src/cli/merge-crawl-results.js [new crawl report] [reference crawl report] [crawl report to create]`
102
-
103
-
104
- ### Analysis tools
105
-
106
- Starting with Reffy v5, analysis tools that used to be part of Reffy's suite of tools to study extracts and create human-readable reports of potential spec anomalies migrated to a companion tool named [Strudy](https://github.com/w3c/strudy). The actual reports get published in a separate [w3c/webref-analysis](https://github.com/w3c/webref-analysis) repository as well.
107
-
108
-
109
- ### WebIDL terms explorer
110
-
111
- See the related **[WebIDLPedia](https://dontcallmedom.github.io/webidlpedia)** project and its [repo](https://github.com/dontcallmedom/webidlpedia).
112
-
113
-
114
- ## Technical notes
115
-
116
- Reffy should be able to parse most of the W3C/WHATWG specifications that define CSS and/or WebIDL terms (both published versions and Editor's Drafts), and more generally speaking specs authored with one of [Bikeshed](https://tabatkins.github.io/bikeshed/) or [ReSpec](https://respec.org/docs/). Reffy can also parse certain IETF specs to some extent, and may work with other types of specs as well.
117
-
118
- ### List of specs to crawl
119
-
120
- Reffy crawls specs defined in [w3c/browser-specs](https://github.com/w3c/browser-specs/). If you believe a spec is missing, please check the [Spec selection criteria](https://github.com/w3c/browser-specs/#spec-selection-criteria) and create an issue (or prepare a pull request) against the [w3c/browser-specs](https://github.com/w3c/browser-specs/) repository.
121
-
122
- ### Crawling a spec
123
-
124
- Given some spec info, the crawler basically goes through the following steps:
125
-
126
- 1. Load the URL through Puppeteer.
127
- 2. If the document contains a "head" section that includes a link whose label looks like "single page", go back to step 2 and load the target of that link instead. This makes the crawler load the single page version of multi-page specifications such as HTML5.
128
- 3. If the document is a multi-page spec without a "single page" version, load the individual subpage and add their content to the bottom of the first page to create a single page version.
129
- 4. If the document uses ReSpec, let ReSpec finish its generation work.
130
- 5. Run internal tools on the generated document to build the relevant information.
131
-
132
- The crawler processes 4 specifications at a time. Network and parsing errors should be reported in the crawl results.
133
-
134
- ### Config parameters
135
-
136
- The crawler reads parameters from the `config.json` file. Optional parameters:
137
-
138
- * `cacheRefresh`: set this flag to `never` to tell the crawler to use the cache entry for a URL directly, instead of sending a conditional HTTP request to check whether the entry is still valid. This parameter is typically useful when developing Reffy's code to work offline.
139
- * `resetCache`: set this flag to `true` to tell the crawler to reset the contents of the local cache when it starts.
140
-
141
-
142
- ## Contributing
143
-
144
- Authors so far are [François Daoust](https://github.com/tidoust/) and [Dominique Hazaël-Massieux](https://github.com/dontcallmedom/).
145
-
146
- Additional ideas, bugs and/or code contributions are most welcome. Create [issues on GitHub](https://github.com/w3c/reffy/issues) as needed!
147
-
148
-
149
- ## Licensing
150
-
151
- The code is available under an [MIT license](LICENSE).
1
+ # Reffy
2
+
3
+ <img align="right" width="256" height="256" src="images/reffy-512.png" alt="Reffy, represented as a brave little worm with a construction helmet, ready to crawl specs">
4
+
5
+ Reffy is a **Web spec crawler** tool. It is notably used to update [Webref](https://github.com/w3c/webref#webref) every 6 hours.
6
+
7
+ The code features a generic crawler that can fetch Web specifications and generate machine-readable extracts out of them. Created extracts include lists of CSS properties, definitions, IDL, links and references contained in the specification.
8
+
9
+ ## How to use
10
+
11
+ ### Pre-requisites
12
+
13
+ To install Reffy, you need [Node.js](https://nodejs.org/en/) 14 or greater.
14
+
15
+ ### Installation
16
+
17
+ Reffy is available as an NPM package. To install the package globally, run:
18
+
19
+ ```bash
20
+ npm install -g reffy
21
+ ```
22
+
23
+ This will install Reffy as a command-line interface tool.
24
+
25
+ The list of specs crawled by default evolves regularly. To make sure that you run the latest version, use:
26
+
27
+ ```bash
28
+ npm update -g reffy
29
+ ```
30
+
31
+ ### Launch Reffy
32
+
33
+ Reffy crawls requested specifications and runs a set of processing modules on the content fetched to create relevant extracts from each spec. Which specs get crawled, and which processing modules get run depend on how the crawler gets called. By default, the crawler crawls all specs defined in [browser-specs](https://github.com/w3c/browser-specs/) and runs all core processing modules defined in the [`browserlib`](https://github.com/w3c/reffy/tree/main/src/browserlib) folder.
34
+
35
+ Reffy can also run post-processing modules on the results of the crawl to create additional views of the data extracted from the spec during the crawl.
36
+
37
+ Crawl results will either be returned to the console or saved in individual files in a report folder when the `--output` parameter is set.
38
+
39
+ Examples of information that can be extracted from the specs:
40
+
41
+ 1. Generic information such as the title of the spec or the URL of the Editor's Draft. This information is typically copied over from [browser-specs](https://github.com/w3c/browser-specs/).
42
+ 2. The list of terms that the spec defines, in a format suitable for ingestion in cross-referencing tools such as [ReSpec](https://respec.org/xref/).
43
+ 3. The list of IDs, the list of headings and the list of links in the spec.
44
+ 4. The list of normative/informative references found in the spec.
45
+ 5. Extended information about WebIDL term definitions and references that the spec contains
46
+ 6. For CSS specs, the list of CSS properties, descriptors and value spaces that the spec defines.
47
+
48
+ The crawler can be fully parameterized to crawl a specific list of specs and run a custom set of processing modules on them. For example:
49
+
50
+ - To extract the raw IDL defined in Fetch, run:
51
+ ```bash
52
+ reffy --spec fetch --module idl
53
+ ```
54
+ - To retrieve the list of specs that the HTML spec references, run (noting that crawling the HTML spec takes some time due to it being a multipage spec):
55
+ ```bash
56
+ reffy --spec html --module refs
57
+ ```
58
+ - To extract the list of CSS properties defined in CSS Flexible Box Layout Module Level 1, run:
59
+ ```bash
60
+ reffy --spec css-flexbox-1 --module css
61
+ ```
62
+ - To extract the list of terms defined in WAI ARIA 1.2, run:
63
+ ```bash
64
+ reffy --spec wai-aria-1.2 --module dfns
65
+ ```
66
+ - To run an hypothetical `extract-editors.mjs` processing module and create individual spec extracts with the result of the processing under an `editors` folder for all specs in browser-specs, run:
67
+ ```bash
68
+ reffy --output reports/test --module editors:extract-editors.mjs
69
+ ```
70
+
71
+ You may add `--terse` (or `-t`) to the above commands to access the extracts directly.
72
+
73
+ Run `reffy -h` for a complete list of options and usage details.
74
+
75
+
76
+ Some notes:
77
+
78
+ * The crawler may take a few minutes, depending on the number of specs it needs to crawl.
79
+ * The crawler uses a local cache for HTTP exchanges. It will create and fill a `.cache` subfolder in particular.
80
+ * If you cloned the repo instead of installing Reffy globally, replace `reffy` width `node reffy.js` in the above example to run Reffy.
81
+
82
+
83
+ ## Additional tools
84
+
85
+ Additional CLI tools in the `src/cli` folder complete the main specs crawler.
86
+
87
+
88
+ ### WebIDL parser
89
+
90
+ The **WebIDL parser** takes the relative path to an IDL extract and generates a JSON structure that describes WebIDL term definitions and references that the spec contains. The parser uses [WebIDL2](https://github.com/darobin/webidl2.js/) to parse the WebIDL content found in the spec. To run the WebIDL parser: `node src/cli/parse-webidl.js [idlfile]`
91
+
92
+ To create the WebIDL extract in the first place, you will need to run the `idl` module in Reffy, as in:
93
+
94
+ ```bash
95
+ reffy --spec fetch --module idl > fetch.idl
96
+ ```
97
+
98
+
99
+ ### Crawl results merger
100
+
101
+ The **crawl results merger** merges a new JSON crawl report into a reference one. This tool is typically useful to replace the crawl results of a given specification with the results of a new run of the crawler on that specification. To run the crawl results merger: `node src/cli/merge-crawl-results.js [new crawl report] [reference crawl report] [crawl report to create]`
102
+
103
+
104
+ ### Analysis tools
105
+
106
+ Starting with Reffy v5, analysis tools that used to be part of Reffy's suite of tools to study extracts and create human-readable reports of potential spec anomalies migrated to a companion tool named [Strudy](https://github.com/w3c/strudy). The actual reports get published in a separate [w3c/webref-analysis](https://github.com/w3c/webref-analysis) repository as well.
107
+
108
+
109
+ ### WebIDL terms explorer
110
+
111
+ See the related **[WebIDLPedia](https://dontcallmedom.github.io/webidlpedia)** project and its [repo](https://github.com/dontcallmedom/webidlpedia).
112
+
113
+
114
+ ## Technical notes
115
+
116
+ Reffy should be able to parse most of the W3C/WHATWG specifications that define CSS and/or WebIDL terms (both published versions and Editor's Drafts), and more generally speaking specs authored with one of [Bikeshed](https://tabatkins.github.io/bikeshed/) or [ReSpec](https://respec.org/docs/). Reffy can also parse certain IETF specs to some extent, and may work with other types of specs as well.
117
+
118
+ ### List of specs to crawl
119
+
120
+ Reffy crawls specs defined in [w3c/browser-specs](https://github.com/w3c/browser-specs/). If you believe a spec is missing, please check the [Spec selection criteria](https://github.com/w3c/browser-specs/#spec-selection-criteria) and create an issue (or prepare a pull request) against the [w3c/browser-specs](https://github.com/w3c/browser-specs/) repository.
121
+
122
+ ### Crawling a spec
123
+
124
+ Given some spec info, the crawler basically goes through the following steps:
125
+
126
+ 1. Load the URL through Puppeteer.
127
+ 2. If the document contains a "head" section that includes a link whose label looks like "single page", go back to step 2 and load the target of that link instead. This makes the crawler load the single page version of multi-page specifications such as HTML5.
128
+ 3. If the document is a multi-page spec without a "single page" version, load the individual subpage and add their content to the bottom of the first page to create a single page version.
129
+ 4. If the document uses ReSpec, let ReSpec finish its generation work.
130
+ 5. Run internal tools on the generated document to build the relevant information.
131
+
132
+ The crawler processes 4 specifications at a time. Network and parsing errors should be reported in the crawl results.
133
+
134
+ ### Config parameters
135
+
136
+ The crawler reads parameters from the `config.json` file. Optional parameters:
137
+
138
+ * `cacheRefresh`: set this flag to `never` to tell the crawler to use the cache entry for a URL directly, instead of sending a conditional HTTP request to check whether the entry is still valid. This parameter is typically useful when developing Reffy's code to work offline.
139
+ * `resetCache`: set this flag to `true` to tell the crawler to reset the contents of the local cache when it starts.
140
+
141
+
142
+ ## Contributing
143
+
144
+ Authors so far are [François Daoust](https://github.com/tidoust/) and [Dominique Hazaël-Massieux](https://github.com/dontcallmedom/).
145
+
146
+ Additional ideas, bugs and/or code contributions are most welcome. Create [issues on GitHub](https://github.com/w3c/reffy/issues) as needed!
147
+
148
+
149
+ ## Licensing
150
+
151
+ The code is available under an [MIT license](LICENSE).
package/index.js CHANGED
@@ -1,9 +1,9 @@
1
- module.exports = {
2
- parseIdl: require("./src/cli/parse-webidl").parse,
3
- crawlSpecs: require("./src/lib/specs-crawler").crawlSpecs,
4
- expandCrawlResult: require("./src/lib/util").expandCrawlResult,
5
- mergeCrawlResults: require("./src/lib/util").mergeCrawlResults,
6
- isLatestLevelThatPasses: require("./src/lib/util").isLatestLevelThatPasses,
7
- getInterfaceTreeInfo: require("./src/lib/util").getInterfaceTreeInfo,
8
- postProcessor: require("./src/lib/post-processor")
9
- };
1
+ module.exports = {
2
+ parseIdl: require("./src/cli/parse-webidl").parse,
3
+ crawlSpecs: require("./src/lib/specs-crawler").crawlSpecs,
4
+ expandCrawlResult: require("./src/lib/util").expandCrawlResult,
5
+ mergeCrawlResults: require("./src/lib/util").mergeCrawlResults,
6
+ isLatestLevelThatPasses: require("./src/lib/util").isLatestLevelThatPasses,
7
+ getInterfaceTreeInfo: require("./src/lib/util").getInterfaceTreeInfo,
8
+ postProcessor: require("./src/lib/post-processor")
9
+ };
package/package.json CHANGED
@@ -1,53 +1,53 @@
1
- {
2
- "name": "reffy",
3
- "version": "9.1.2",
4
- "description": "W3C/WHATWG spec dependencies exploration companion. Features a short set of tools to study spec references as well as WebIDL term definitions and references found in W3C specifications.",
5
- "repository": {
6
- "type": "git",
7
- "url": "https://github.com/w3c/reffy.git"
8
- },
9
- "files": [
10
- "index.js",
11
- "reffy.js",
12
- "src/"
13
- ],
14
- "bugs": {
15
- "url": "https://github.com/w3c/reffy/issues"
16
- },
17
- "author": {
18
- "name": "tidoust",
19
- "email": "fd@w3.org"
20
- },
21
- "contributors": [
22
- {
23
- "name": "dontcallmedom",
24
- "email": "dom@w3.org"
25
- }
26
- ],
27
- "license": "MIT",
28
- "engines": {
29
- "node": ">=14"
30
- },
31
- "main": "index.js",
32
- "bin": "./reffy.js",
33
- "dependencies": {
34
- "abortcontroller-polyfill": "1.7.3",
35
- "commander": "9.4.0",
36
- "fetch-filecache-for-crawling": "4.1.0",
37
- "puppeteer": "17.0.0",
38
- "semver": "^7.3.5",
39
- "web-specs": "2.23.0",
40
- "webidl2": "24.2.2"
41
- },
42
- "devDependencies": {
43
- "chai": "4.3.6",
44
- "mocha": "10.0.0",
45
- "nock": "13.2.9",
46
- "respec": "32.2.3",
47
- "respec-hljs": "2.1.1",
48
- "rollup": "2.79.0"
49
- },
50
- "scripts": {
51
- "test": "mocha --recursive tests/"
52
- }
53
- }
1
+ {
2
+ "name": "reffy",
3
+ "version": "10.0.0",
4
+ "description": "W3C/WHATWG spec dependencies exploration companion. Features a short set of tools to study spec references as well as WebIDL term definitions and references found in W3C specifications.",
5
+ "repository": {
6
+ "type": "git",
7
+ "url": "https://github.com/w3c/reffy.git"
8
+ },
9
+ "files": [
10
+ "index.js",
11
+ "reffy.js",
12
+ "src/"
13
+ ],
14
+ "bugs": {
15
+ "url": "https://github.com/w3c/reffy/issues"
16
+ },
17
+ "author": {
18
+ "name": "tidoust",
19
+ "email": "fd@w3.org"
20
+ },
21
+ "contributors": [
22
+ {
23
+ "name": "dontcallmedom",
24
+ "email": "dom@w3.org"
25
+ }
26
+ ],
27
+ "license": "MIT",
28
+ "engines": {
29
+ "node": ">=14"
30
+ },
31
+ "main": "index.js",
32
+ "bin": "./reffy.js",
33
+ "dependencies": {
34
+ "abortcontroller-polyfill": "1.7.3",
35
+ "commander": "9.4.0",
36
+ "fetch-filecache-for-crawling": "4.1.0",
37
+ "puppeteer": "17.1.1",
38
+ "semver": "^7.3.5",
39
+ "web-specs": "2.23.0",
40
+ "webidl2": "24.2.2"
41
+ },
42
+ "devDependencies": {
43
+ "chai": "4.3.6",
44
+ "mocha": "10.0.0",
45
+ "nock": "13.2.9",
46
+ "respec": "32.2.3",
47
+ "respec-hljs": "2.1.1",
48
+ "rollup": "2.79.0"
49
+ },
50
+ "scripts": {
51
+ "test": "mocha --recursive tests/"
52
+ }
53
+ }