@bhanquier/template-core 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs ADDED
@@ -0,0 +1,160 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ computeRequiredCoverage: () => computeRequiredCoverage,
24
+ lexicalSimilarityScore: () => lexicalSimilarityScore,
25
+ mergeTokens: () => mergeTokens,
26
+ normalizeFreeText: () => normalizeFreeText,
27
+ normalizeTokenValue: () => normalizeTokenValue,
28
+ suggestFieldByLexicalRules: () => suggestFieldByLexicalRules,
29
+ tokenKey: () => tokenKey,
30
+ tokenize: () => tokenize
31
+ });
32
+ module.exports = __toCommonJS(index_exports);
33
+
34
+ // src/token-normalization.ts
35
+ function normalizeTokenValue(value) {
36
+ return value.trim().replace(/\s+/g, " ");
37
+ }
38
+ function tokenKey(syntax, rawToken) {
39
+ return `${syntax}:${normalizeTokenValue(rawToken)}`;
40
+ }
41
+ function mergeTokens(tokens) {
42
+ const merged = /* @__PURE__ */ new Map();
43
+ for (const token of tokens) {
44
+ const key = tokenKey(token.syntax, token.rawToken);
45
+ const existing = merged.get(key);
46
+ if (existing) {
47
+ existing.occurrences += token.occurrences;
48
+ } else {
49
+ merged.set(key, {
50
+ ...token,
51
+ rawToken: normalizeTokenValue(token.rawToken)
52
+ });
53
+ }
54
+ }
55
+ return [...merged.values()].sort((a, b) => {
56
+ if (b.occurrences !== a.occurrences) return b.occurrences - a.occurrences;
57
+ return tokenKey(a.syntax, a.rawToken).localeCompare(tokenKey(b.syntax, b.rawToken));
58
+ });
59
+ }
60
+
61
+ // src/mapping-progress.ts
62
+ function computeRequiredCoverage(mappings, requiredFieldNames) {
63
+ const mappedFields = new Set(
64
+ mappings.filter((m) => m.status !== "ignored" && m.mappedField).map((m) => m.mappedField)
65
+ );
66
+ const missingRequired = requiredFieldNames.filter((name) => !mappedFields.has(name));
67
+ const requiredMapped = requiredFieldNames.length - missingRequired.length;
68
+ const requiredTotal = requiredFieldNames.length;
69
+ const coverageScore = requiredTotal > 0 ? Math.round(requiredMapped / requiredTotal * 100) : 100;
70
+ return {
71
+ requiredTotal,
72
+ requiredMapped,
73
+ coverageScore,
74
+ missingRequired,
75
+ complete: missingRequired.length === 0
76
+ };
77
+ }
78
+
79
+ // src/lexical.ts
80
+ function normalizeFreeText(value) {
81
+ return value.normalize("NFD").replace(/[\u0300-\u036f]/g, "").toLowerCase().replace(/[^a-z0-9]+/g, " ").trim();
82
+ }
83
+ function tokenize(value) {
84
+ return value.split(" ").map((token) => token.trim()).filter((token) => token.length >= 2);
85
+ }
86
+ function lexicalSimilarityScore(normalizedRaw, rawTokens, normalizedCandidate, canonicalize) {
87
+ if (!normalizedCandidate) return 0;
88
+ if (normalizedRaw === normalizedCandidate) return 0.98;
89
+ if (canonicalize && canonicalize(normalizedRaw) === canonicalize(normalizedCandidate)) return 0.97;
90
+ if (normalizedCandidate.includes(normalizedRaw) || normalizedRaw.includes(normalizedCandidate)) {
91
+ return 0.75;
92
+ }
93
+ const candidateTokens = new Set(tokenize(normalizedCandidate));
94
+ const intersection = new Set(Array.from(rawTokens).filter((t) => candidateTokens.has(t))).size;
95
+ const union = (/* @__PURE__ */ new Set([...rawTokens, ...candidateTokens])).size;
96
+ if (union === 0) return 0;
97
+ return intersection / union;
98
+ }
99
+
100
+ // src/suggestion.ts
101
+ function suggestFieldByLexicalRules(input) {
102
+ const exact = input.resolveExactField(input.rawToken);
103
+ if (exact) {
104
+ return {
105
+ field: exact,
106
+ confidence: 1,
107
+ reason: "Correspondance exacte"
108
+ };
109
+ }
110
+ const normalizedText = normalizeFreeText(input.rawToken);
111
+ if (!normalizedText) {
112
+ return { field: null, confidence: null, reason: null };
113
+ }
114
+ for (const rule of input.keywordRules) {
115
+ if (rule.pattern.test(normalizedText)) {
116
+ return { field: rule.field, confidence: rule.confidence, reason: rule.reason };
117
+ }
118
+ }
119
+ const rawTokens = new Set(tokenize(normalizedText));
120
+ let bestField = null;
121
+ let bestScore = 0;
122
+ for (const field of input.candidateFields) {
123
+ const candidates = [field.name, field.label ?? "", ...field.aliases ?? []];
124
+ const candidateScore = candidates.reduce((best, candidate) => {
125
+ const normalizedCandidate = normalizeFreeText(candidate);
126
+ const score = lexicalSimilarityScore(
127
+ normalizedText,
128
+ rawTokens,
129
+ normalizedCandidate,
130
+ input.canonicalizeFieldName
131
+ );
132
+ return Math.max(best, score);
133
+ }, 0);
134
+ if (candidateScore > bestScore) {
135
+ bestScore = candidateScore;
136
+ bestField = field.name;
137
+ }
138
+ }
139
+ const minimumScore = input.minimumScore ?? 0.45;
140
+ if (!bestField || bestScore < minimumScore) {
141
+ return { field: null, confidence: null, reason: null };
142
+ }
143
+ const confidence = Math.min(0.85, Math.max(0.5, bestScore + 0.15));
144
+ return {
145
+ field: bestField,
146
+ confidence: Number(confidence.toFixed(2)),
147
+ reason: "Similarit\xE9 lexicale"
148
+ };
149
+ }
150
+ // Annotate the CommonJS export names for ESM import in node:
151
+ 0 && (module.exports = {
152
+ computeRequiredCoverage,
153
+ lexicalSimilarityScore,
154
+ mergeTokens,
155
+ normalizeFreeText,
156
+ normalizeTokenValue,
157
+ suggestFieldByLexicalRules,
158
+ tokenKey,
159
+ tokenize
160
+ });
package/dist/index.js ADDED
@@ -0,0 +1,126 @@
1
+ // src/token-normalization.ts
2
+ function normalizeTokenValue(value) {
3
+ return value.trim().replace(/\s+/g, " ");
4
+ }
5
+ function tokenKey(syntax, rawToken) {
6
+ return `${syntax}:${normalizeTokenValue(rawToken)}`;
7
+ }
8
+ function mergeTokens(tokens) {
9
+ const merged = /* @__PURE__ */ new Map();
10
+ for (const token of tokens) {
11
+ const key = tokenKey(token.syntax, token.rawToken);
12
+ const existing = merged.get(key);
13
+ if (existing) {
14
+ existing.occurrences += token.occurrences;
15
+ } else {
16
+ merged.set(key, {
17
+ ...token,
18
+ rawToken: normalizeTokenValue(token.rawToken)
19
+ });
20
+ }
21
+ }
22
+ return [...merged.values()].sort((a, b) => {
23
+ if (b.occurrences !== a.occurrences) return b.occurrences - a.occurrences;
24
+ return tokenKey(a.syntax, a.rawToken).localeCompare(tokenKey(b.syntax, b.rawToken));
25
+ });
26
+ }
27
+
28
+ // src/mapping-progress.ts
29
+ function computeRequiredCoverage(mappings, requiredFieldNames) {
30
+ const mappedFields = new Set(
31
+ mappings.filter((m) => m.status !== "ignored" && m.mappedField).map((m) => m.mappedField)
32
+ );
33
+ const missingRequired = requiredFieldNames.filter((name) => !mappedFields.has(name));
34
+ const requiredMapped = requiredFieldNames.length - missingRequired.length;
35
+ const requiredTotal = requiredFieldNames.length;
36
+ const coverageScore = requiredTotal > 0 ? Math.round(requiredMapped / requiredTotal * 100) : 100;
37
+ return {
38
+ requiredTotal,
39
+ requiredMapped,
40
+ coverageScore,
41
+ missingRequired,
42
+ complete: missingRequired.length === 0
43
+ };
44
+ }
45
+
46
+ // src/lexical.ts
47
+ function normalizeFreeText(value) {
48
+ return value.normalize("NFD").replace(/[\u0300-\u036f]/g, "").toLowerCase().replace(/[^a-z0-9]+/g, " ").trim();
49
+ }
50
+ function tokenize(value) {
51
+ return value.split(" ").map((token) => token.trim()).filter((token) => token.length >= 2);
52
+ }
53
+ function lexicalSimilarityScore(normalizedRaw, rawTokens, normalizedCandidate, canonicalize) {
54
+ if (!normalizedCandidate) return 0;
55
+ if (normalizedRaw === normalizedCandidate) return 0.98;
56
+ if (canonicalize && canonicalize(normalizedRaw) === canonicalize(normalizedCandidate)) return 0.97;
57
+ if (normalizedCandidate.includes(normalizedRaw) || normalizedRaw.includes(normalizedCandidate)) {
58
+ return 0.75;
59
+ }
60
+ const candidateTokens = new Set(tokenize(normalizedCandidate));
61
+ const intersection = new Set(Array.from(rawTokens).filter((t) => candidateTokens.has(t))).size;
62
+ const union = (/* @__PURE__ */ new Set([...rawTokens, ...candidateTokens])).size;
63
+ if (union === 0) return 0;
64
+ return intersection / union;
65
+ }
66
+
67
+ // src/suggestion.ts
68
+ function suggestFieldByLexicalRules(input) {
69
+ const exact = input.resolveExactField(input.rawToken);
70
+ if (exact) {
71
+ return {
72
+ field: exact,
73
+ confidence: 1,
74
+ reason: "Correspondance exacte"
75
+ };
76
+ }
77
+ const normalizedText = normalizeFreeText(input.rawToken);
78
+ if (!normalizedText) {
79
+ return { field: null, confidence: null, reason: null };
80
+ }
81
+ for (const rule of input.keywordRules) {
82
+ if (rule.pattern.test(normalizedText)) {
83
+ return { field: rule.field, confidence: rule.confidence, reason: rule.reason };
84
+ }
85
+ }
86
+ const rawTokens = new Set(tokenize(normalizedText));
87
+ let bestField = null;
88
+ let bestScore = 0;
89
+ for (const field of input.candidateFields) {
90
+ const candidates = [field.name, field.label ?? "", ...field.aliases ?? []];
91
+ const candidateScore = candidates.reduce((best, candidate) => {
92
+ const normalizedCandidate = normalizeFreeText(candidate);
93
+ const score = lexicalSimilarityScore(
94
+ normalizedText,
95
+ rawTokens,
96
+ normalizedCandidate,
97
+ input.canonicalizeFieldName
98
+ );
99
+ return Math.max(best, score);
100
+ }, 0);
101
+ if (candidateScore > bestScore) {
102
+ bestScore = candidateScore;
103
+ bestField = field.name;
104
+ }
105
+ }
106
+ const minimumScore = input.minimumScore ?? 0.45;
107
+ if (!bestField || bestScore < minimumScore) {
108
+ return { field: null, confidence: null, reason: null };
109
+ }
110
+ const confidence = Math.min(0.85, Math.max(0.5, bestScore + 0.15));
111
+ return {
112
+ field: bestField,
113
+ confidence: Number(confidence.toFixed(2)),
114
+ reason: "Similarit\xE9 lexicale"
115
+ };
116
+ }
117
+ export {
118
+ computeRequiredCoverage,
119
+ lexicalSimilarityScore,
120
+ mergeTokens,
121
+ normalizeFreeText,
122
+ normalizeTokenValue,
123
+ suggestFieldByLexicalRules,
124
+ tokenKey,
125
+ tokenize
126
+ };
package/package.json ADDED
@@ -0,0 +1,40 @@
1
+ {
2
+ "name": "@bhanquier/template-core",
3
+ "version": "0.1.1",
4
+ "description": "Canonical types and pure utilities for templating engines",
5
+ "type": "module",
6
+ "sideEffects": false,
7
+ "main": "./dist/index.js",
8
+ "module": "./dist/index.js",
9
+ "types": "./dist/index.d.ts",
10
+ "exports": {
11
+ ".": {
12
+ "types": "./dist/index.d.ts",
13
+ "import": "./dist/index.js",
14
+ "require": "./dist/index.cjs"
15
+ }
16
+ },
17
+ "files": [
18
+ "dist"
19
+ ],
20
+ "publishConfig": {
21
+ "access": "public"
22
+ },
23
+ "repository": {
24
+ "type": "git",
25
+ "url": "https://github.com/TrustAkt/trustakt-templating.git"
26
+ },
27
+ "scripts": {
28
+ "build": "tsup src/index.ts --format esm,cjs --out-dir dist --clean && tsc --noEmit",
29
+ "dev": "tsup src/index.ts --format esm,cjs --out-dir dist --watch",
30
+ "test": "vitest run",
31
+ "typecheck": "tsc --noEmit"
32
+ },
33
+ "devDependencies": {
34
+ "@types/node": "^20.10.0",
35
+ "tsup": "^8.5.1",
36
+ "typescript": "^5.3.0",
37
+ "vitest": "^4.0.15"
38
+ },
39
+ "license": "UNLICENSED"
40
+ }