occam-custom-grammars 5.0.1228 → 5.0.1229
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/example.js +250 -48
- package/lib/constants.js +9 -1
- package/lib/customGrammar/combined.js +23 -6
- package/lib/customGrammar.js +2 -19
- package/lib/example/customGrammar/userDefined1.js +2 -2
- package/lib/example/view.js +5 -5
- package/lib/typesMap.js +45 -0
- package/lib/utilities/bnf.js +53 -5
- package/lib/utilities/grammar.js +31 -0
- package/lib/utilities/nominal.js +25 -0
- package/lib/utilities/vocabulary.js +8 -16
- package/package.json +2 -2
- package/src/constants.js +2 -0
- package/src/customGrammar/combined.js +30 -5
- package/src/customGrammar.js +1 -23
- package/src/example/customGrammar/userDefined1.js +5 -1
- package/src/example/view.js +5 -5
- package/src/typesMap.js +50 -0
- package/src/utilities/bnf.js +79 -3
- package/src/utilities/grammar.js +12 -0
- package/src/utilities/nominal.js +9 -0
- package/src/utilities/vocabulary.js +5 -22
|
@@ -16,31 +16,23 @@ _export(exports, {
|
|
|
16
16
|
return validateVocabulary;
|
|
17
17
|
}
|
|
18
18
|
});
|
|
19
|
-
var _occamlexers = require("occam-lexers");
|
|
20
|
-
var _occamgrammars = require("occam-grammars");
|
|
21
19
|
var _necessary = require("necessary");
|
|
22
20
|
var _query = require("../utilities/query");
|
|
21
|
+
var _nominal = require("../utilities/nominal");
|
|
23
22
|
var _constants = require("../constants");
|
|
24
|
-
var
|
|
25
|
-
var nominalLexer = _occamlexers.CommonLexer.fromNothing(_occamgrammars.NominalLexer), customGrammarVocabularyLexer = _occamgrammars.CustomGrammarVocabularyLexer.fromNothing(), customGrammarVocabularyParser = _occamgrammars.CustomGrammarVocabularyParser.fromNothing();
|
|
23
|
+
var _grammar = require("../utilities/grammar");
|
|
26
24
|
var first = _necessary.arrayUtilities.first, second = _necessary.arrayUtilities.second;
|
|
25
|
+
var expressionNodesQuery = (0, _query.nodesQuery)("//expression");
|
|
27
26
|
function validateVocabulary(vocabulary) {
|
|
28
27
|
if (vocabulary === null || vocabulary === _constants.EMPTY_STRING) {
|
|
29
28
|
return;
|
|
30
29
|
}
|
|
31
|
-
var content = vocabulary, tokens = customGrammarVocabularyLexer.tokenise(content), node = customGrammarVocabularyParser.parse(tokens);
|
|
32
|
-
if (node === null) {
|
|
33
|
-
throw new Error("The vocabulary cannot be parsed.");
|
|
34
|
-
}
|
|
35
|
-
var errorNodes = errorNodesQuery(node), errorNodesLength = errorNodes.length;
|
|
36
|
-
if (errorNodesLength > 0) {
|
|
37
|
-
throw new Error("The vocabulary contains errors.");
|
|
38
|
-
}
|
|
30
|
+
var content = vocabulary, tokens = _grammar.customGrammarVocabularyLexer.tokenise(content), node = _grammar.customGrammarVocabularyParser.parse(tokens);
|
|
39
31
|
var expressionNodes = expressionNodesQuery(node);
|
|
40
32
|
expressionNodes.forEach(function(expressionNode) {
|
|
41
|
-
var content = contentFromExpressionNode(expressionNode), tokens = nominalLexer.tokenise(content), tokensLength = tokens.length;
|
|
33
|
+
var content = contentFromExpressionNode(expressionNode), tokens = _nominal.nominalLexer.tokenise(content), tokensLength = tokens.length;
|
|
42
34
|
if (tokensLength > 1) {
|
|
43
|
-
throw new Error("Tokenising '".concat(content, "' results in more than one token."));
|
|
35
|
+
throw new Error("Tokenising the '".concat(content, "' content results in more than one token."));
|
|
44
36
|
}
|
|
45
37
|
var firstToken = first(tokens), token = firstToken, type = token.getType();
|
|
46
38
|
if (type !== _constants.UNASSIGNED_TYPE) {
|
|
@@ -55,7 +47,7 @@ function expressionsFromVocabulary(vocabulary, expressions) {
|
|
|
55
47
|
if (vocabulary === null || vocabulary === _constants.EMPTY_STRING) {
|
|
56
48
|
return;
|
|
57
49
|
}
|
|
58
|
-
var content = vocabulary, tokens = customGrammarVocabularyLexer.tokenise(content), node = customGrammarVocabularyParser.parse(tokens), expressionNodes = expressionNodesQuery(node);
|
|
50
|
+
var content = vocabulary, tokens = _grammar.customGrammarVocabularyLexer.tokenise(content), node = _grammar.customGrammarVocabularyParser.parse(tokens), expressionNodes = expressionNodesQuery(node);
|
|
59
51
|
expressionNodes.forEach(function(expressionNode) {
|
|
60
52
|
var content = contentFromExpressionNode(expressionNode), expression = content; ///
|
|
61
53
|
expressions.push(expression);
|
|
@@ -66,4 +58,4 @@ function contentFromExpressionNode(expressionNode) {
|
|
|
66
58
|
return content;
|
|
67
59
|
}
|
|
68
60
|
|
|
69
|
-
//# sourceMappingURL=data:application/json;base64,
|
|
61
|
+
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy91dGlsaXRpZXMvdm9jYWJ1bGFyeS5qcyJdLCJzb3VyY2VzQ29udGVudCI6WyJcInVzZSBzdHJpY3RcIjtcblxuaW1wb3J0IHsgYXJyYXlVdGlsaXRpZXMgfSBmcm9tIFwibmVjZXNzYXJ5XCI7XG5cbmltcG9ydCB7IG5vZGVzUXVlcnkgfSBmcm9tIFwiLi4vdXRpbGl0aWVzL3F1ZXJ5XCI7XG5pbXBvcnQgeyBub21pbmFsTGV4ZXIgfSBmcm9tIFwiLi4vdXRpbGl0aWVzL25vbWluYWxcIjtcbmltcG9ydCB7IEVNUFRZX1NUUklORywgVU5BU1NJR05FRF9UWVBFLCBVTkRFUlNDT1JFX0NIQVJBQ1RFUiB9IGZyb20gXCIuLi9jb25zdGFudHNcIjtcbmltcG9ydCB7IGN1c3RvbUdyYW1tYXJWb2NhYnVsYXJ5TGV4ZXIsIGN1c3RvbUdyYW1tYXJWb2NhYnVsYXJ5UGFyc2VyIH0gZnJvbSBcIi4uL3V0aWxpdGllcy9ncmFtbWFyXCJcblxuY29uc3QgeyBmaXJzdCwgc2Vjb25kIH0gPSBhcnJheVV0aWxpdGllcztcblxuY29uc3QgZXhwcmVzc2lvbk5vZGVzUXVlcnkgPSBub2Rlc1F1ZXJ5KFwiLy9leHByZXNzaW9uXCIpXG5cbmV4cG9ydCBmdW5jdGlvbiB2YWxpZGF0ZVZvY2FidWxhcnkodm9jYWJ1bGFyeSkge1xuICBpZiAoKHZvY2FidWxhcnkgPT09IG51bGwpIHx8ICh2b2NhYnVsYXJ5ID09PSBFTVBUWV9TVFJJTkcpKSB7XG4gICAgcmV0dXJuO1xuICB9XG5cbiAgY29uc3QgY29udGVudCA9IHZvY2FidWxhcnksIC8vL1xuICAgICAgICB0b2tlbnMgPSBjdXN0b21HcmFtbWFyVm9jYWJ1bGFyeUxleGVyLnRva2VuaXNlKGNvbnRlbnQpLFxuICAgICAgICBub2RlID0gY3VzdG9tR3JhbW1hclZvY2FidWxhcnlQYXJzZXIucGFyc2UodG9rZW5zKTtcblxuICBjb25zdCBleHByZXNzaW9uTm9kZXMgPSBleHByZXNzaW9uTm9kZXNRdWVyeShub2RlKTtcblxuICBleHByZXNzaW9uTm9kZXMuZm9yRWFjaCgoZXhwcmVzc2lvbk5vZGUpID0+IHtcbiAgICBjb25zdCBjb250ZW50ID0gY29udGVudEZyb21FeHByZXNzaW9uTm9kZShleHByZXNzaW9uTm9kZSksXG4gICAgICAgICAgdG9rZW5zID0gbm9taW5hbExleGVyLnRva2VuaXNlKGNvbnRlbnQpLFxuICAgICAgICAgIHRva2Vuc0xlbmd0aCA9IHRva2Vucy5sZW5ndGg7XG5cbiAgICBpZiAodG9rZW5zTGVuZ3RoID4gMSkge1xuICAgICAgdGhyb3cgbmV3IEVycm9yKGBUb2tlbmlzaW5nIHRoZSAnJHtjb250ZW50fScgY29udGVudCByZXN1bHRzIGluIG1vcmUgdGhhbiBvbmUgdG9rZW4uYCk7XG4gICAgfVxuXG4gICAgY29uc3QgZmlyc3RUb2tlbiA9IGZpcnN0KHRva2VucyksXG4gICAgICAgICAgdG9rZW4gPSBmaXJzdFRva2VuLFxuICAgICAgICAgIHR5cGUgPSB0b2tlbi5nZXRUeXBlKCk7XG5cbiAgICBpZiAodHlwZSAhPT0gVU5BU1NJR05FRF9UWVBFKSB7XG4gICAgICB0aHJvdyBuZXcgRXJyb3IoYFRoZSAnJHt0eXBlfScgdHlwZSBvZiB0aGUgJyR7Y29udGVudH0nIHRva2VuIGlzIG5vdCAndW5hc3NpZ25lZCcuYCk7XG4gICAgfVxuXG4gICAgaWYgKGNvbnRlbnQgPT09IFVOREVSU0NPUkVfQ0hBUkFDVEVSKSB7XG4gICAgICB0aHJvdyBuZXcgRXJyb3IoYFRoZSAnJHtjb250ZW50fScgdG9rZW4gY2Fubm90IGJlIGFuIHVuZGVyc2NvcmUuYCk7XG4gICAgfVxuICB9KTtcbn1cblxuZXhwb3J0IGZ1bmN0aW9uIGV4cHJlc3Npb25zRnJvbVZvY2FidWxhcnkodm9jYWJ1bGFyeSwgZXhwcmVzc2lvbnMpIHtcbiAgaWYgKCh2b2NhYnVsYXJ5ID09PSBudWxsKSB8fCAodm9jYWJ1bGFyeSA9PT0gRU1QVFlfU1RSSU5HKSkge1xuICAgIHJldHVybjtcbiAgfVxuXG4gIGNvbnN0IGNvbnRlbnQgPSB2b2NhYnVsYXJ5LCAvLy9cbiAgICAgICAgdG9rZW5zID0gY3VzdG9tR3JhbW1hclZvY2FidWxhcnlMZXhlci50b2tlbmlzZShjb250ZW50KSxcbiAgICAgICAgbm9kZSA9IGN1c3RvbUdyYW1tYXJWb2NhYnVsYXJ5UGFyc2VyLnBhcnNlKHRva2VucyksXG4gICAgICAgIGV4cHJlc3Npb25Ob2RlcyA9IGV4cHJlc3Npb25Ob2Rlc1F1ZXJ5KG5vZGUpO1xuXG4gIGV4cHJlc3Npb25Ob2Rlcy5mb3JFYWNoKChleHByZXNzaW9uTm9kZSkgPT4ge1xuICAgIGNvbnN0IGNvbnRlbnQgPSBjb250ZW50RnJvbUV4cHJlc3Npb25Ob2RlKGV4cHJlc3Npb25Ob2RlKSxcbiAgICAgICAgICBleHByZXNzaW9uID0gY29udGVudDsgLy8vXG5cbiAgICBleHByZXNzaW9ucy5wdXNoKGV4cHJlc3Npb24pO1xuICB9KTtcbn1cblxuZnVuY3Rpb24gY29udGVudEZyb21FeHByZXNzaW9uTm9kZShleHByZXNzaW9uTm9kZSkge1xuICBjb25zdCBub25UZXJtaW5hbE5vZGUgPSBleHByZXNzaW9uTm9kZSwgLy8vXG4gICAgICAgIGNoaWxkTm9kZXMgPSBub25UZXJtaW5hbE5vZGUuZ2V0Q2hpbGROb2RlcygpLFxuICAgICAgICBzZWNvbmRDaGlsZE5vZGUgPSBzZWNvbmQoY2hpbGROb2RlcyksXG4gICAgICAgIHVuYXNzaWduZWRUZXJtaW5hbE5vZGUgPSBzZWNvbmRDaGlsZE5vZGUsICAvLy9cbiAgICAgICAgY29udGVudCA9IHVuYXNzaWduZWRUZXJtaW5hbE5vZGUuZ2V0Q29udGVudCgpO1xuXG4gIHJldHVybiBjb250ZW50O1xufVxuIl0sIm5hbWVzIjpbImV4cHJlc3Npb25zRnJvbVZvY2FidWxhcnkiLCJ2YWxpZGF0ZVZvY2FidWxhcnkiLCJmaXJzdCIsImFycmF5VXRpbGl0aWVzIiwic2Vjb25kIiwiZXhwcmVzc2lvbk5vZGVzUXVlcnkiLCJub2Rlc1F1ZXJ5Iiwidm9jYWJ1bGFyeSIsIkVNUFRZX1NUUklORyIsImNvbnRlbnQiLCJ0b2tlbnMiLCJjdXN0b21HcmFtbWFyVm9jYWJ1bGFyeUxleGVyIiwidG9rZW5pc2UiLCJub2RlIiwiY3VzdG9tR3JhbW1hclZvY2FidWxhcnlQYXJzZXIiLCJwYXJzZSIsImV4cHJlc3Npb25Ob2RlcyIsImZvckVhY2giLCJleHByZXNzaW9uTm9kZSIsImNvbnRlbnRGcm9tRXhwcmVzc2lvbk5vZGUiLCJub21pbmFsTGV4ZXIiLCJ0b2tlbnNMZW5ndGgiLCJsZW5ndGgiLCJFcnJvciIsImZpcnN0VG9rZW4iLCJ0b2tlbiIsInR5cGUiLCJnZXRUeXBlIiwiVU5BU1NJR05FRF9UWVBFIiwiVU5ERVJTQ09SRV9DSEFSQUNURVIiLCJleHByZXNzaW9ucyIsImV4cHJlc3Npb24iLCJwdXNoIiwibm9uVGVybWluYWxOb2RlIiwiY2hpbGROb2RlcyIsImdldENoaWxkTm9kZXMiLCJzZWNvbmRDaGlsZE5vZGUiLCJ1bmFzc2lnbmVkVGVybWluYWxOb2RlIiwiZ2V0Q29udGVudCJdLCJtYXBwaW5ncyI6IkFBQUE7Ozs7Ozs7Ozs7O1FBK0NnQkE7ZUFBQUE7O1FBbENBQztlQUFBQTs7O3lCQVhlO3FCQUVKO3VCQUNFO3lCQUN1Qzt1QkFDUTtBQUU1RSxJQUFRQyxRQUFrQkMseUJBQWMsQ0FBaENELE9BQU9FLFNBQVdELHlCQUFjLENBQXpCQztBQUVmLElBQU1DLHVCQUF1QkMsSUFBQUEsaUJBQVUsRUFBQztBQUVqQyxTQUFTTCxtQkFBbUJNLFVBQVU7SUFDM0MsSUFBSSxBQUFDQSxlQUFlLFFBQVVBLGVBQWVDLHVCQUFZLEVBQUc7UUFDMUQ7SUFDRjtJQUVBLElBQU1DLFVBQVVGLFlBQ1ZHLFNBQVNDLHFDQUE0QixDQUFDQyxRQUFRLENBQUNILFVBQy9DSSxPQUFPQyxzQ0FBNkIsQ0FBQ0MsS0FBSyxDQUFDTDtJQUVqRCxJQUFNTSxrQkFBa0JYLHFCQUFxQlE7SUFFN0NHLGdCQUFnQkMsT0FBTyxDQUFDLFNBQUNDO1FBQ3ZCLElBQU1ULFVBQVVVLDBCQUEwQkQsaUJBQ3BDUixTQUFTVSxxQkFBWSxDQUFDUixRQUFRLENBQUNILFVBQy9CWSxlQUFlWCxPQUFPWSxNQUFNO1FBRWxDLElBQUlELGVBQWUsR0FBRztZQUNwQixNQUFNLElBQUlFLE1BQU0sQUFBQyxtQkFBMEIsT0FBUmQsU0FBUTtRQUM3QztRQUVBLElBQU1lLGFBQWF0QixNQUFNUSxTQUNuQmUsUUFBUUQsWUFDUkUsT0FBT0QsTUFBTUUsT0FBTztRQUUxQixJQUFJRCxTQUFTRSwwQkFBZSxFQUFFO1lBQzVCLE1BQU0sSUFBSUwsTUFBTSxBQUFDLFFBQTZCZCxPQUF0QmlCLE1BQUssbUJBQXlCLE9BQVJqQixTQUFRO1FBQ3hEO1FBRUEsSUFBSUEsWUFBWW9CLCtCQUFvQixFQUFFO1lBQ3BDLE1BQU0sSUFBSU4sTUFBTSxBQUFDLFFBQWUsT0FBUmQsU0FBUTtRQUNsQztJQUNGO0FBQ0Y7QUFFTyxTQUFTVCwwQkFBMEJPLFVBQVUsRUFBRXVCLFdBQVc7SUFDL0QsSUFBSSxBQUFDdkIsZUFBZSxRQUFVQSxlQUFlQyx1QkFBWSxFQUFHO1FBQzFEO0lBQ0Y7SUFFQSxJQUFNQyxVQUFVRixZQUNWRyxTQUFTQyxxQ0FBNEIsQ0FBQ0MsUUFBUSxDQUFDSCxVQUMvQ0ksT0FBT0Msc0NBQTZCLENBQUNDLEtBQUssQ0FBQ0wsU0FDM0NNLGtCQUFrQlgscUJBQXFCUTtJQUU3Q0csZ0JBQWdCQyxPQUFPLENBQUMsU0FBQ0M7UUFDdkIsSUFBTVQsVUFBVVUsMEJBQTBCRCxpQkFDcENhLGFBQWF0QixTQUFTLEdBQUc7UUFFL0JxQixZQUFZRSxJQUFJLENBQUNEO0lBQ25CO0FBQ0Y7QUFFQSxTQUFTWiwwQkFBMEJELGNBQWM7SUFDL0MsSUFBTWUsa0JBQWtCZixnQkFDbEJnQixhQUFhRCxnQkFBZ0JFLGFBQWEsSUFDMUNDLGtCQUFrQmhDLE9BQU84QixhQUN6QkcseUJBQXlCRCxpQkFDekIzQixVQUFVNEIsdUJBQXVCQyxVQUFVO0lBRWpELE9BQU83QjtBQUNUIn0=
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "occam-custom-grammars",
|
|
3
3
|
"author": "James Smith",
|
|
4
|
-
"version": "5.0.
|
|
4
|
+
"version": "5.0.1229",
|
|
5
5
|
"license": "MIT, Anti-996",
|
|
6
6
|
"homepage": "https://github.com/djalbat/occam-custom-grammars",
|
|
7
7
|
"description": "Occam's custom grammars.",
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
"occam-grammar-utilities": "^8.0.345",
|
|
15
15
|
"occam-grammars": "^1.3.311",
|
|
16
16
|
"occam-lexers": "^23.1.1",
|
|
17
|
-
"occam-parsers": "^23.1.
|
|
17
|
+
"occam-parsers": "^23.1.3",
|
|
18
18
|
"occam-query": "^4.1.109"
|
|
19
19
|
},
|
|
20
20
|
"devDependencies": {
|
package/src/constants.js
CHANGED
|
@@ -6,7 +6,9 @@ import { eliminateLeftRecursion } from "occam-grammar-utilities";
|
|
|
6
6
|
|
|
7
7
|
import defaultCustomGrammar from "../customGrammar/default";
|
|
8
8
|
|
|
9
|
+
import { validateBNF } from "../utilities/bnf";
|
|
9
10
|
import { EMPTY_STRING, VERTICAL_BAR } from "../constants";
|
|
11
|
+
import { TERM_RULE_NAME, STATEMENT_RULE_NAME } from "../ruleNames";
|
|
10
12
|
import { validateVocabulary, expressionsFromVocabulary } from "../utilities/vocabulary";
|
|
11
13
|
import { TYPE_VOCABULARY_NAME, SYMBOL_VOCABULARY_NAME } from "../vocabularyNames";
|
|
12
14
|
|
|
@@ -45,7 +47,7 @@ export default class CombinedCustomGrammar {
|
|
|
45
47
|
customGrammars = [ defaultCustomGrammar, ...customGrammars ]; ///
|
|
46
48
|
}
|
|
47
49
|
|
|
48
|
-
const rules =
|
|
50
|
+
const rules = rulesFromCustomGrammars(customGrammars),
|
|
49
51
|
entries = entriesFromCustomGrammars(customGrammars),
|
|
50
52
|
combinedCustomGrammar = new CombinedCustomGrammar(rules, entries);
|
|
51
53
|
|
|
@@ -57,7 +59,7 @@ export default class CombinedCustomGrammar {
|
|
|
57
59
|
customGrammars = [ defaultCustomGrammar, ...customGrammars ]; ///
|
|
58
60
|
}
|
|
59
61
|
|
|
60
|
-
const rules =
|
|
62
|
+
const rules = rulesFromCustomGrammars(customGrammars),
|
|
61
63
|
entries = entriesFromCustomGrammars(customGrammars),
|
|
62
64
|
combinedCustomGrammar = new CombinedCustomGrammar(rules, entries);
|
|
63
65
|
|
|
@@ -65,9 +67,13 @@ export default class CombinedCustomGrammar {
|
|
|
65
67
|
}
|
|
66
68
|
}
|
|
67
69
|
|
|
68
|
-
function
|
|
69
|
-
const
|
|
70
|
-
|
|
70
|
+
function rulesFromCustomGrammars(customGrammars) {
|
|
71
|
+
const ruleNames = [
|
|
72
|
+
TERM_RULE_NAME,
|
|
73
|
+
STATEMENT_RULE_NAME,
|
|
74
|
+
],
|
|
75
|
+
bnfs = ruleNames.map((ruleName) => {
|
|
76
|
+
const bnf = bnfFromCustomGrammars(customGrammars, ruleName);
|
|
71
77
|
|
|
72
78
|
return bnf;
|
|
73
79
|
}),
|
|
@@ -93,6 +99,25 @@ function entriesFromCustomGrammars(customGrammars) {
|
|
|
93
99
|
return entries;
|
|
94
100
|
}
|
|
95
101
|
|
|
102
|
+
function bnfFromCustomGrammars(customGrammars, ruleName) {
|
|
103
|
+
const bnfs = [];
|
|
104
|
+
|
|
105
|
+
backwardsForEach(customGrammars, (customGrammar) => {
|
|
106
|
+
const bnf = customGrammar.getBNF(ruleName),
|
|
107
|
+
customGrammarDefaultCustomGrammar = customGrammar.isDefaultCustomGrammar();
|
|
108
|
+
|
|
109
|
+
if (!customGrammarDefaultCustomGrammar) {
|
|
110
|
+
validateBNF(bnf, ruleName);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
bnfs.push(bnf);
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
const bnf = bnfs.join(EMPTY_STRING);
|
|
117
|
+
|
|
118
|
+
return bnf;
|
|
119
|
+
}
|
|
120
|
+
|
|
96
121
|
function entryFromCustomGrammars(customGrammars, vocabularyName) {
|
|
97
122
|
const expressions = [];
|
|
98
123
|
|
package/src/customGrammar.js
CHANGED
|
@@ -34,34 +34,12 @@ export default class CustomGrammar {
|
|
|
34
34
|
return this.symbolVocabulary;
|
|
35
35
|
}
|
|
36
36
|
|
|
37
|
-
getBNF(ruleName
|
|
37
|
+
getBNF(ruleName) {
|
|
38
38
|
let bnf;
|
|
39
39
|
|
|
40
40
|
switch (ruleName) {
|
|
41
41
|
case TERM_RULE_NAME: bnf = this.termBNF; break;
|
|
42
42
|
case STATEMENT_RULE_NAME: bnf = this.statementBNF; break;
|
|
43
|
-
|
|
44
|
-
default: {
|
|
45
|
-
const ruleNames = [
|
|
46
|
-
TERM_RULE_NAME,
|
|
47
|
-
STATEMENT_RULE_NAME
|
|
48
|
-
],
|
|
49
|
-
combinedBNF = ruleNames.reduce((combinedBNF, ruleName) => {
|
|
50
|
-
const bnf = this.getBNF(ruleName);
|
|
51
|
-
|
|
52
|
-
if (bnf !== EMPTY_STRING) {
|
|
53
|
-
combinedBNF = `${combinedBNF}
|
|
54
|
-
|
|
55
|
-
${bnf}`;
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
return combinedBNF;
|
|
59
|
-
}, EMPTY_STRING);
|
|
60
|
-
|
|
61
|
-
bnf = combinedBNF; ///
|
|
62
|
-
|
|
63
|
-
break;
|
|
64
|
-
}
|
|
65
43
|
}
|
|
66
44
|
|
|
67
45
|
return bnf;
|
|
@@ -5,7 +5,11 @@ import { CustomGrammar } from "../../index"; ///
|
|
|
5
5
|
import { USER_DEFINED_CUSTOM_GRAMMAR_NAME_1 } from "../grammarNames";
|
|
6
6
|
|
|
7
7
|
const name = USER_DEFINED_CUSTOM_GRAMMAR_NAME_1,
|
|
8
|
-
termBNF =
|
|
8
|
+
termBNF = `
|
|
9
|
+
|
|
10
|
+
term ::= [type] "stuff" ;
|
|
11
|
+
|
|
12
|
+
`,
|
|
9
13
|
statementBNF = "",
|
|
10
14
|
typeVocabulary = "",
|
|
11
15
|
symbolVocabulary = "",
|
package/src/example/view.js
CHANGED
|
@@ -132,16 +132,16 @@ class View extends Element {
|
|
|
132
132
|
Custom grammar
|
|
133
133
|
</SubHeading>
|
|
134
134
|
<NameSelect onChange={changeHandler} />
|
|
135
|
-
<SubHeading>
|
|
136
|
-
BNF
|
|
137
|
-
</SubHeading>
|
|
138
|
-
<RuleNameSelect onChange={changeHandler} />
|
|
139
|
-
<BNFTextarea onKeyUp={keyUpHandler} />
|
|
140
135
|
<SubHeading>
|
|
141
136
|
Vocabulary
|
|
142
137
|
</SubHeading>
|
|
143
138
|
<VocabularyNameSelect onChange={changeHandler} />
|
|
144
139
|
<VocabularyTextarea onKeyUp={keyUpHandler} />
|
|
140
|
+
<SubHeading>
|
|
141
|
+
BNF
|
|
142
|
+
</SubHeading>
|
|
143
|
+
<RuleNameSelect onChange={changeHandler} />
|
|
144
|
+
<BNFTextarea onKeyUp={keyUpHandler} />
|
|
145
145
|
<SubHeading>
|
|
146
146
|
Start rule
|
|
147
147
|
</SubHeading>
|
package/src/typesMap.js
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
|
|
3
|
+
import { arrayUtilities } from "necessary";
|
|
4
|
+
|
|
5
|
+
import { nominalParser } from "./utilities/nominal";
|
|
6
|
+
import { TERM_RULE_NAME, STATEMENT_RULE_NAME } from "./ruleNames";
|
|
7
|
+
import { STUFF_RULE_NAME, NONSENSE_RULE_NAME } from "./constants";
|
|
8
|
+
|
|
9
|
+
const { first } = arrayUtilities;
|
|
10
|
+
|
|
11
|
+
const ruleMap = nominalParser.getRuleMap(),
|
|
12
|
+
stuffRule = ruleMap[STUFF_RULE_NAME],
|
|
13
|
+
nonsenseRule = ruleMap[NONSENSE_RULE_NAME],
|
|
14
|
+
stuffTypes = typesFromRule(stuffRule),
|
|
15
|
+
nonsenseTypes = typesFromRule(nonsenseRule),
|
|
16
|
+
termTypes = stuffTypes, ///
|
|
17
|
+
statementTypes = nonsenseTypes,
|
|
18
|
+
typesMap = {
|
|
19
|
+
[TERM_RULE_NAME]: termTypes,
|
|
20
|
+
[STATEMENT_RULE_NAME]: statementTypes
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
export default typesMap;
|
|
24
|
+
|
|
25
|
+
function typesFromRule(rule) {
|
|
26
|
+
let parts;
|
|
27
|
+
|
|
28
|
+
const definitions = rule.getDefinitions(),
|
|
29
|
+
firstDDefinition = first(definitions),
|
|
30
|
+
definition = firstDDefinition; ///
|
|
31
|
+
|
|
32
|
+
parts = definition.getParts();
|
|
33
|
+
|
|
34
|
+
const firstPart = first(parts),
|
|
35
|
+
oneOrMorePartsPart = firstPart, ///
|
|
36
|
+
part = oneOrMorePartsPart.getPart(),
|
|
37
|
+
choiceOrPartsPart = part; ///
|
|
38
|
+
|
|
39
|
+
parts = choiceOrPartsPart.getParts();
|
|
40
|
+
|
|
41
|
+
const types = parts.map((part) => {
|
|
42
|
+
const significantTokenTypePart = part, ///
|
|
43
|
+
significantTokenType = significantTokenTypePart.getSignificantTokenType(),
|
|
44
|
+
type = significantTokenType; ///
|
|
45
|
+
|
|
46
|
+
return type;
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
return types;
|
|
50
|
+
}
|
package/src/utilities/bnf.js
CHANGED
|
@@ -1,12 +1,88 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
|
|
3
|
+
import { arrayUtilities } from "necessary";
|
|
3
4
|
|
|
4
|
-
import
|
|
5
|
+
import typesMap from "../typesMap";
|
|
5
6
|
|
|
6
|
-
|
|
7
|
-
|
|
7
|
+
import { nodesQuery } from "../utilities/query";
|
|
8
|
+
import { nominalLexer } from "../utilities/nominal";
|
|
9
|
+
import { EMPTY_STRING, UNDERSCORE_CHARACTER } from "../constants";
|
|
10
|
+
import { customGrammarBNFLexer, customGrammarBNFParser } from "../utilities/grammar";
|
|
11
|
+
|
|
12
|
+
const { first, second } = arrayUtilities;
|
|
13
|
+
|
|
14
|
+
const stringLiteralTerminalNodesQuery = nodesQuery("//stringLiteral/@*!"),
|
|
15
|
+
significantTokenTypeTerminalNodesQuery = nodesQuery("//significantTokenType/@*!");
|
|
16
|
+
|
|
17
|
+
export function validateBNF(bnf, ruleName) {
|
|
18
|
+
if ((bnf === null) || (bnf === EMPTY_STRING)) {
|
|
8
19
|
return;
|
|
9
20
|
}
|
|
10
21
|
|
|
22
|
+
const content = bnf,
|
|
23
|
+
tokens = customGrammarBNFLexer.tokenise(content),
|
|
24
|
+
node = customGrammarBNFParser.parse(tokens);
|
|
25
|
+
|
|
26
|
+
const types = typesMap[ruleName],
|
|
27
|
+
significantTokenTypeTerminalNodes = significantTokenTypeTerminalNodesQuery(node);
|
|
28
|
+
|
|
29
|
+
significantTokenTypeTerminalNodes.forEach((significantTokenTypeTerminalNode) => {
|
|
30
|
+
const type = typeFromSignificantTokenTypeTerminalNode(significantTokenTypeTerminalNode),
|
|
31
|
+
typesIncludeType = types.includes(type);
|
|
32
|
+
|
|
33
|
+
if (!typesIncludeType) {
|
|
34
|
+
throw new Error(`The '${type}' type is not included in the '${ruleName}' rule's types.`)
|
|
35
|
+
}
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
const stringLiteralTerminalNodes = stringLiteralTerminalNodesQuery(node);
|
|
39
|
+
|
|
40
|
+
stringLiteralTerminalNodes.forEach((stringLiteralTerminalNode) => {
|
|
41
|
+
const content = contentFromStringLiteralTerminalNode(stringLiteralTerminalNode);
|
|
42
|
+
|
|
43
|
+
if (content === UNDERSCORE_CHARACTER) {
|
|
44
|
+
throw new Error(`The "${content}" string literal cannot be an underscore.`);
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
const tokens = nominalLexer.tokenise(content),
|
|
48
|
+
tokensLength = tokens.length;
|
|
49
|
+
|
|
50
|
+
if (tokensLength !== 1) {
|
|
51
|
+
throw new Error(`Tokenising the "${content}" string literal does not result in a single token.`);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const firstToken = first(tokens),
|
|
55
|
+
token = firstToken, ///
|
|
56
|
+
type = token.getType(),
|
|
57
|
+
typesIncludeType = types.includes(type);
|
|
58
|
+
|
|
59
|
+
if (!typesIncludeType) {
|
|
60
|
+
throw new Error(`The "${content}" string literal's token's '${type}' type is not included in the '${ruleName}' rule's types.`)
|
|
61
|
+
}
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
function contentFromStringLiteralTerminalNode(stringLiteralTerminalNode) {
|
|
66
|
+
let content;
|
|
67
|
+
|
|
68
|
+
content = stringLiteralTerminalNode.getContent();
|
|
69
|
+
|
|
70
|
+
const matches = content.match(/"([^"]+)"/),
|
|
71
|
+
secondMatch = second(matches);
|
|
72
|
+
|
|
73
|
+
content = secondMatch; ///
|
|
74
|
+
|
|
75
|
+
return content;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
function typeFromSignificantTokenTypeTerminalNode(significantTokenTypeTerminalNode) {
|
|
79
|
+
let type;
|
|
80
|
+
|
|
81
|
+
const content = significantTokenTypeTerminalNode.getContent(),
|
|
82
|
+
matches = content.match(/\[([^\]]+)\]/),
|
|
83
|
+
secondMatch = second(matches);
|
|
84
|
+
|
|
85
|
+
type = secondMatch; ///
|
|
11
86
|
|
|
87
|
+
return type;
|
|
12
88
|
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
|
|
3
|
+
import { CustomGrammarBNFLexer, CustomGrammarBNFParser } from "occam-grammars";
|
|
4
|
+
import { CustomGrammarVocabularyLexer, CustomGrammarVocabularyParser } from "occam-grammars";
|
|
5
|
+
|
|
6
|
+
export const customGrammarBNFLexer = CustomGrammarBNFLexer.fromNothing();
|
|
7
|
+
|
|
8
|
+
export const customGrammarBNFParser = CustomGrammarBNFParser.fromNothing();
|
|
9
|
+
|
|
10
|
+
export const customGrammarVocabularyLexer = CustomGrammarVocabularyLexer.fromNothing();
|
|
11
|
+
|
|
12
|
+
export const customGrammarVocabularyParser = CustomGrammarVocabularyParser.fromNothing();
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
|
|
3
|
+
import { CommonLexer } from "occam-lexers";
|
|
4
|
+
import { CommonParser } from "occam-parsers";
|
|
5
|
+
import { NominalLexer, NominalParser } from "occam-grammars";
|
|
6
|
+
|
|
7
|
+
export const nominalLexer = CommonLexer.fromNothing(NominalLexer);
|
|
8
|
+
|
|
9
|
+
export const nominalParser = CommonParser.fromNothing(NominalParser);
|
|
@@ -1,22 +1,16 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
|
|
3
|
-
import { CommonLexer } from "occam-lexers";
|
|
4
|
-
import { NominalLexer } from "occam-grammars";
|
|
5
3
|
import { arrayUtilities } from "necessary";
|
|
6
|
-
import { CustomGrammarVocabularyLexer, CustomGrammarVocabularyParser } from "occam-grammars";
|
|
7
4
|
|
|
8
5
|
import { nodesQuery } from "../utilities/query";
|
|
6
|
+
import { nominalLexer } from "../utilities/nominal";
|
|
9
7
|
import { EMPTY_STRING, UNASSIGNED_TYPE, UNDERSCORE_CHARACTER } from "../constants";
|
|
10
|
-
|
|
11
|
-
const errorNodesQuery = nodesQuery("//error"),
|
|
12
|
-
expressionNodesQuery = nodesQuery("//expression")
|
|
13
|
-
|
|
14
|
-
const nominalLexer = CommonLexer.fromNothing(NominalLexer),
|
|
15
|
-
customGrammarVocabularyLexer = CustomGrammarVocabularyLexer.fromNothing(),
|
|
16
|
-
customGrammarVocabularyParser = CustomGrammarVocabularyParser.fromNothing();
|
|
8
|
+
import { customGrammarVocabularyLexer, customGrammarVocabularyParser } from "../utilities/grammar"
|
|
17
9
|
|
|
18
10
|
const { first, second } = arrayUtilities;
|
|
19
11
|
|
|
12
|
+
const expressionNodesQuery = nodesQuery("//expression")
|
|
13
|
+
|
|
20
14
|
export function validateVocabulary(vocabulary) {
|
|
21
15
|
if ((vocabulary === null) || (vocabulary === EMPTY_STRING)) {
|
|
22
16
|
return;
|
|
@@ -26,17 +20,6 @@ export function validateVocabulary(vocabulary) {
|
|
|
26
20
|
tokens = customGrammarVocabularyLexer.tokenise(content),
|
|
27
21
|
node = customGrammarVocabularyParser.parse(tokens);
|
|
28
22
|
|
|
29
|
-
if (node === null) {
|
|
30
|
-
throw new Error("The vocabulary cannot be parsed.");
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
const errorNodes = errorNodesQuery(node),
|
|
34
|
-
errorNodesLength = errorNodes.length;
|
|
35
|
-
|
|
36
|
-
if (errorNodesLength > 0) {
|
|
37
|
-
throw new Error("The vocabulary contains errors.");
|
|
38
|
-
}
|
|
39
|
-
|
|
40
23
|
const expressionNodes = expressionNodesQuery(node);
|
|
41
24
|
|
|
42
25
|
expressionNodes.forEach((expressionNode) => {
|
|
@@ -45,7 +28,7 @@ export function validateVocabulary(vocabulary) {
|
|
|
45
28
|
tokensLength = tokens.length;
|
|
46
29
|
|
|
47
30
|
if (tokensLength > 1) {
|
|
48
|
-
throw new Error(`Tokenising '${content}' results in more than one token.`);
|
|
31
|
+
throw new Error(`Tokenising the '${content}' content results in more than one token.`);
|
|
49
32
|
}
|
|
50
33
|
|
|
51
34
|
const firstToken = first(tokens),
|