euparliamentmonitor 0.9.10 → 0.9.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -2
- package/package.json +7 -6
- package/scripts/cache-thresholds.js +305 -0
- package/scripts/extend-artifacts.js +369 -0
- package/scripts/imf-fallback-ladder.js +628 -0
- package/scripts/lint-prompts.js +33 -0
- package/scripts/mcp/ep-mcp-client.d.ts +73 -20
- package/scripts/mcp/ep-mcp-client.js +165 -36
- package/scripts/mcp/wb-mcp-client.d.ts +20 -0
- package/scripts/mcp/wb-mcp-client.js +130 -0
- package/scripts/scrape-doceo-votes.js +555 -0
- package/scripts/utils/mcp-probe.d.ts +50 -0
- package/scripts/utils/mcp-probe.js +273 -0
- package/scripts/validate-analysis-completeness.js +2 -0
package/README.md
CHANGED
|
@@ -136,7 +136,7 @@ The published site is the audience-facing companion to this npm/TypeScript packa
|
|
|
136
136
|
|
|
137
137
|
**MCP Server Integration**: The project uses the
|
|
138
138
|
[European-Parliament-MCP-Server](https://github.com/Hack23/European-Parliament-MCP-Server)
|
|
139
|
-
v1.3.
|
|
139
|
+
v1.3.6 for accessing real EU Parliament data via the Model Context Protocol.
|
|
140
140
|
|
|
141
141
|
- **MCP Server Status**: ✅ Fully operational — 60+ EP data tools available
|
|
142
142
|
(feeds, direct lookups, analytical tools, intelligence correlation)
|
|
@@ -432,7 +432,7 @@ import type { ArticleCategory, LanguageCode } from 'euparliamentmonitor/types';
|
|
|
432
432
|
|
|
433
433
|
## 🔌 Data Sources
|
|
434
434
|
|
|
435
|
-
**Primary — European Parliament MCP Server** ([Hack23/European-Parliament-MCP-Server](https://github.com/Hack23/European-Parliament-MCP-Server) v1.3.
|
|
435
|
+
**Primary — European Parliament MCP Server** ([Hack23/European-Parliament-MCP-Server](https://github.com/Hack23/European-Parliament-MCP-Server) v1.3.6+, fully operational):
|
|
436
436
|
|
|
437
437
|
- 🗳️ Plenary sessions, voting records, roll-call votes
|
|
438
438
|
- 📜 Adopted texts, motions, resolutions, urgency files
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "euparliamentmonitor",
|
|
3
|
-
"version": "0.9.
|
|
3
|
+
"version": "0.9.12",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"description": "European Parliament Intelligence Platform - Monitor political activity with systematic transparency",
|
|
6
6
|
"main": "scripts/index.js",
|
|
@@ -74,6 +74,7 @@
|
|
|
74
74
|
"optimize-css": "node scripts/optimize-css.js",
|
|
75
75
|
"minify-assets": "node scripts/minify-assets.js",
|
|
76
76
|
"validate-ep-api": "npx tsx src/utils/validate-ep-api.ts",
|
|
77
|
+
"mcp:probe": "npx tsx src/utils/mcp-probe.ts",
|
|
77
78
|
"lint:prompts": "node scripts/lint-prompts.js",
|
|
78
79
|
"htmlhint": "sh -c 'htmlhint *.html; set -- news/*.html; if [ -e \"$1\" ]; then htmlhint \"$@\"; else echo \"No news/*.html files to lint\"; fi'",
|
|
79
80
|
"serve": "python3 -m http.server 8080",
|
|
@@ -149,7 +150,7 @@
|
|
|
149
150
|
"@playwright/test": "1.60.0",
|
|
150
151
|
"@types/d3": "7.4.3",
|
|
151
152
|
"@types/markdown-it": "^14.1.2",
|
|
152
|
-
"@types/node": "25.
|
|
153
|
+
"@types/node": "25.8.0",
|
|
153
154
|
"@types/papaparse": "5.5.2",
|
|
154
155
|
"@typescript-eslint/eslint-plugin": "8.59.3",
|
|
155
156
|
"@typescript-eslint/parser": "8.59.3",
|
|
@@ -159,7 +160,7 @@
|
|
|
159
160
|
"chartjs-plugin-annotation": "3.1.0",
|
|
160
161
|
"clean-css": "^5.3.3",
|
|
161
162
|
"d3": "7.9.0",
|
|
162
|
-
"eslint": "10.
|
|
163
|
+
"eslint": "10.4.0",
|
|
163
164
|
"eslint-config-prettier": "10.1.8",
|
|
164
165
|
"eslint-plugin-jsdoc": "62.9.0",
|
|
165
166
|
"eslint-plugin-security": "4.0.0",
|
|
@@ -168,7 +169,7 @@
|
|
|
168
169
|
"html-minifier-terser": "^7.2.0",
|
|
169
170
|
"htmlhint": "1.9.2",
|
|
170
171
|
"husky": "9.1.7",
|
|
171
|
-
"jscpd": "4.
|
|
172
|
+
"jscpd": "4.2.1",
|
|
172
173
|
"knip": "^6.7.0",
|
|
173
174
|
"lint-staged": "17.0.4",
|
|
174
175
|
"mermaid": "11.15.0",
|
|
@@ -178,7 +179,7 @@
|
|
|
178
179
|
"sharp": "^0.34.5",
|
|
179
180
|
"terser": "^5.47.1",
|
|
180
181
|
"ts-api-utils": "2.5.0",
|
|
181
|
-
"tsx": "4.
|
|
182
|
+
"tsx": "4.22.0",
|
|
182
183
|
"typedoc": "0.28.19",
|
|
183
184
|
"typescript": "6.0.3",
|
|
184
185
|
"vitest": "4.1.6"
|
|
@@ -187,7 +188,7 @@
|
|
|
187
188
|
"node": ">=26"
|
|
188
189
|
},
|
|
189
190
|
"dependencies": {
|
|
190
|
-
"european-parliament-mcp-server": "1.3.
|
|
191
|
+
"european-parliament-mcp-server": "1.3.6",
|
|
191
192
|
"markdown-it": "^14.1.1",
|
|
192
193
|
"markdown-it-anchor": "^9.2.0",
|
|
193
194
|
"markdown-it-attrs": "^4.3.1",
|
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
// SPDX-FileCopyrightText: 2024-2026 Hack23 AB
|
|
3
|
+
// SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* @module scripts/cache-thresholds
|
|
7
|
+
* @description Thresholds cache helper for Stage B analysis runs.
|
|
8
|
+
*
|
|
9
|
+
* The `analysis/methodologies/reference-quality-thresholds.json` file is large
|
|
10
|
+
* (~1 000 lines). Without this helper, each artifact's Stage B write cycle
|
|
11
|
+
* causes the agent to re-read the entire thresholds file to look up the floor
|
|
12
|
+
* for a single relativePath. With 38+ artifacts per run, that amounts to 38+
|
|
13
|
+
* wasted LLM invocations.
|
|
14
|
+
*
|
|
15
|
+
* This script:
|
|
16
|
+
* 1. Reads the full `reference-quality-thresholds.json`
|
|
17
|
+
* 2. Filters to the subset relevant to the given article-type slug
|
|
18
|
+
* 3. Writes a compact `thresholds-cache.json` under the run directory
|
|
19
|
+
* 4. Uses a content hash for the cache key so subsequent calls with identical
|
|
20
|
+
* inputs short-circuit without re-writing the file
|
|
21
|
+
*
|
|
22
|
+
* The output file is a flat map: `{ [relativePath]: number }` where the value
|
|
23
|
+
* is the per-artifact line floor for that slug, plus a small set of metadata
|
|
24
|
+
* fields.
|
|
25
|
+
*
|
|
26
|
+
* Invocation (Stage B start — called once per run):
|
|
27
|
+
* node scripts/cache-thresholds.js \
|
|
28
|
+
* --slug breaking \
|
|
29
|
+
* --run-id breaking-run-1234 \
|
|
30
|
+
* [--analysis-dir analysis/daily/2026-05-14/breaking] \
|
|
31
|
+
* [--repo-root /path/to/repo]
|
|
32
|
+
*
|
|
33
|
+
* Exports (for unit testing):
|
|
34
|
+
* loadThresholds(repoRoot)
|
|
35
|
+
* filterThresholdsForSlug(thresholds, slug)
|
|
36
|
+
* buildThresholdsCache(repoRoot, slug, runId, runDir)
|
|
37
|
+
*/
|
|
38
|
+
|
|
39
|
+
import crypto from 'node:crypto';
|
|
40
|
+
import fs from 'node:fs';
|
|
41
|
+
import path from 'node:path';
|
|
42
|
+
import process from 'node:process';
|
|
43
|
+
import { fileURLToPath } from 'node:url';
|
|
44
|
+
|
|
45
|
+
/** Path relative to repo root for the canonical thresholds file. */
|
|
46
|
+
const THRESHOLDS_REL_PATH = 'analysis/methodologies/reference-quality-thresholds.json';
|
|
47
|
+
|
|
48
|
+
/** Default minimum line floor when no per-artifact entry is found. */
|
|
49
|
+
const DEFAULT_MIN_LINES = 30;
|
|
50
|
+
|
|
51
|
+
/** Output filename written to the run directory. */
|
|
52
|
+
const CACHE_FILENAME = 'thresholds-cache.json';
|
|
53
|
+
|
|
54
|
+
// ---------------------------------------------------------------------------
|
|
55
|
+
// Article-type slug → thresholds key normalisation
|
|
56
|
+
// ---------------------------------------------------------------------------
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Normalise an article-type slug to the canonical key used in the thresholds
|
|
60
|
+
* JSON. The slug might be a workflow slug (e.g. `breaking`) or a longer form
|
|
61
|
+
* (e.g. `breaking-run-123456`). We strip any run-ID suffix.
|
|
62
|
+
*
|
|
63
|
+
* @param {string} slug - Raw slug value
|
|
64
|
+
* @returns {string} Canonical article-type key
|
|
65
|
+
*/
|
|
66
|
+
export function normaliseSlug(slug) {
|
|
67
|
+
// Strip run-ID suffixes in either form:
|
|
68
|
+
// - `-run-<digits>` (e.g. `breaking-run-1234`) — newer workflow runs
|
|
69
|
+
// - `-run<digits>` (e.g. `breaking-run184`) — legacy folder names
|
|
70
|
+
// Each form may carry an optional `-<digits>` sub-run suffix used by
|
|
71
|
+
// long-horizon workflows (e.g. `breaking-run-123-456`).
|
|
72
|
+
return slug.replace(/-run-?\d+(-\d+)?$/, '');
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// ---------------------------------------------------------------------------
|
|
76
|
+
// Thresholds file loader
|
|
77
|
+
// ---------------------------------------------------------------------------
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Load the full `reference-quality-thresholds.json` from the repo root.
|
|
81
|
+
*
|
|
82
|
+
* @param {string} repoRoot - Absolute path to the repository root
|
|
83
|
+
* @returns {{ thresholds: object, raw: string }}
|
|
84
|
+
* @throws {Error} If the file is missing or not valid JSON
|
|
85
|
+
*/
|
|
86
|
+
export function loadThresholds(repoRoot) {
|
|
87
|
+
const filePath = path.join(repoRoot, THRESHOLDS_REL_PATH);
|
|
88
|
+
|
|
89
|
+
if (!fs.existsSync(filePath)) {
|
|
90
|
+
throw new Error(`Thresholds file not found: ${filePath}`);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const raw = fs.readFileSync(filePath, 'utf8');
|
|
94
|
+
const parsed = JSON.parse(raw);
|
|
95
|
+
return { thresholds: parsed, raw };
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// ---------------------------------------------------------------------------
|
|
99
|
+
// Slug filter
|
|
100
|
+
// ---------------------------------------------------------------------------
|
|
101
|
+
|
|
102
|
+
/**
|
|
103
|
+
* Filter the thresholds object to only the entries relevant to a given slug.
|
|
104
|
+
*
|
|
105
|
+
* The thresholds JSON has a `thresholds` object keyed by `articleType`.
|
|
106
|
+
* For each article type, it contains a `files` map of `relativePath → number`.
|
|
107
|
+
*
|
|
108
|
+
* The output is a flat map: `{ [relativePath]: floor }` for the matching
|
|
109
|
+
* article type(s). Cross-type entries (present in multiple article types)
|
|
110
|
+
* use the maximum floor across all matched types.
|
|
111
|
+
*
|
|
112
|
+
* @param {object} thresholds - Parsed thresholds JSON root object
|
|
113
|
+
* @param {string} slug - Article-type slug (already normalised)
|
|
114
|
+
* @returns {{ floors: Record<string, number>, matchedTypes: string[], defaultFloor: number }}
|
|
115
|
+
*/
|
|
116
|
+
export function filterThresholdsForSlug(thresholds, slug) {
|
|
117
|
+
const floors = {};
|
|
118
|
+
const matchedTypes = [];
|
|
119
|
+
|
|
120
|
+
const thresholdsMap = thresholds?.thresholds ?? {};
|
|
121
|
+
const defaultFloor =
|
|
122
|
+
thresholds?.defaults?.minLines ?? DEFAULT_MIN_LINES;
|
|
123
|
+
|
|
124
|
+
for (const [articleType, typeConfig] of Object.entries(thresholdsMap)) {
|
|
125
|
+
// Match when the articleType equals the slug or contains it as a prefix
|
|
126
|
+
const normType = normaliseSlug(articleType);
|
|
127
|
+
if (normType !== slug && articleType !== slug) continue;
|
|
128
|
+
|
|
129
|
+
matchedTypes.push(articleType);
|
|
130
|
+
|
|
131
|
+
const filesMap = typeConfig?.files ?? typeConfig ?? {};
|
|
132
|
+
for (const [relPath, floor] of Object.entries(filesMap)) {
|
|
133
|
+
if (typeof floor === 'number') {
|
|
134
|
+
// Use max floor across matched types for cross-type overlap
|
|
135
|
+
floors[relPath] = Math.max(floors[relPath] ?? 0, floor);
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
return { floors, matchedTypes, defaultFloor };
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// ---------------------------------------------------------------------------
|
|
144
|
+
// Cache builder
|
|
145
|
+
// ---------------------------------------------------------------------------
|
|
146
|
+
|
|
147
|
+
/**
|
|
148
|
+
* Build and write a slug-filtered thresholds cache file.
|
|
149
|
+
*
|
|
150
|
+
* If an existing cache file with the same content hash already exists,
|
|
151
|
+
* the write is skipped and `{ cached: true }` is returned.
|
|
152
|
+
*
|
|
153
|
+
* @param {string} repoRoot - Absolute path to the repository root
|
|
154
|
+
* @param {string} slug - Article-type slug
|
|
155
|
+
* @param {string} runId - Run identifier (used for the output path)
|
|
156
|
+
* @param {string} [runDir] - Override for the run directory (defaults to
|
|
157
|
+
* `analysis/runs/<runId>` relative to repoRoot)
|
|
158
|
+
* @returns {{
|
|
159
|
+
* outputFile: string,
|
|
160
|
+
* floors: Record<string, number>,
|
|
161
|
+
* matchedTypes: string[],
|
|
162
|
+
* defaultFloor: number,
|
|
163
|
+
* contentHash: string,
|
|
164
|
+
* cached: boolean
|
|
165
|
+
* }}
|
|
166
|
+
*/
|
|
167
|
+
export function buildThresholdsCache(repoRoot, slug, runId, runDir) {
|
|
168
|
+
const normSlug = normaliseSlug(slug);
|
|
169
|
+
|
|
170
|
+
const { thresholds, raw } = loadThresholds(repoRoot);
|
|
171
|
+
|
|
172
|
+
// Content hash of the source thresholds file (not the filtered output).
|
|
173
|
+
const contentHash = crypto.createHash('sha256').update(raw).digest('hex').slice(0, 16);
|
|
174
|
+
|
|
175
|
+
const { floors, matchedTypes, defaultFloor } = filterThresholdsForSlug(thresholds, normSlug);
|
|
176
|
+
|
|
177
|
+
const outputDir = runDir
|
|
178
|
+
? runDir
|
|
179
|
+
: path.join(repoRoot, 'analysis', 'runs', runId);
|
|
180
|
+
|
|
181
|
+
fs.mkdirSync(outputDir, { recursive: true });
|
|
182
|
+
|
|
183
|
+
const outputFile = path.join(outputDir, CACHE_FILENAME);
|
|
184
|
+
|
|
185
|
+
const payload = {
|
|
186
|
+
generatedAt: new Date().toISOString(),
|
|
187
|
+
slug: normSlug,
|
|
188
|
+
runId,
|
|
189
|
+
contentHash,
|
|
190
|
+
defaultFloor,
|
|
191
|
+
matchedTypes,
|
|
192
|
+
artifactCount: Object.keys(floors).length,
|
|
193
|
+
floors,
|
|
194
|
+
};
|
|
195
|
+
|
|
196
|
+
const payloadJson = JSON.stringify(payload, null, 2);
|
|
197
|
+
|
|
198
|
+
// Short-circuit if file already exists with identical content hash
|
|
199
|
+
if (fs.existsSync(outputFile)) {
|
|
200
|
+
try {
|
|
201
|
+
const existing = JSON.parse(fs.readFileSync(outputFile, 'utf8'));
|
|
202
|
+
if (existing.contentHash === contentHash && existing.slug === normSlug) {
|
|
203
|
+
return { outputFile, floors, matchedTypes, defaultFloor, contentHash, cached: true };
|
|
204
|
+
}
|
|
205
|
+
} catch {
|
|
206
|
+
// Fall through to (re)write
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
fs.writeFileSync(outputFile, payloadJson, 'utf8');
|
|
211
|
+
|
|
212
|
+
return { outputFile, floors, matchedTypes, defaultFloor, contentHash, cached: false };
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// ---------------------------------------------------------------------------
|
|
216
|
+
// CLI entry point
|
|
217
|
+
// ---------------------------------------------------------------------------
|
|
218
|
+
|
|
219
|
+
/**
|
|
220
|
+
* Parse minimalist `--key value` CLI args.
|
|
221
|
+
*
|
|
222
|
+
* @param {string[]} argv
|
|
223
|
+
* @returns {Record<string, string|boolean>}
|
|
224
|
+
*/
|
|
225
|
+
/* c8 ignore start */
|
|
226
|
+
function parseArgs(argv) {
|
|
227
|
+
const out = {};
|
|
228
|
+
let i = 0;
|
|
229
|
+
while (i < argv.length) {
|
|
230
|
+
const arg = argv[i];
|
|
231
|
+
if (arg.startsWith('--')) {
|
|
232
|
+
const key = arg.slice(2);
|
|
233
|
+
const next = argv[i + 1];
|
|
234
|
+
if (next === undefined || next.startsWith('--')) {
|
|
235
|
+
out[key] = true;
|
|
236
|
+
i += 1;
|
|
237
|
+
} else {
|
|
238
|
+
out[key] = next;
|
|
239
|
+
i += 2;
|
|
240
|
+
}
|
|
241
|
+
} else {
|
|
242
|
+
i += 1;
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
return out;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
/**
|
|
249
|
+
* CLI main entry point.
|
|
250
|
+
*
|
|
251
|
+
* @param {string[]} [argv]
|
|
252
|
+
* @returns {void}
|
|
253
|
+
*/
|
|
254
|
+
export function main(argv = process.argv.slice(2)) {
|
|
255
|
+
const args = parseArgs(argv);
|
|
256
|
+
|
|
257
|
+
if (!args.slug || !args['run-id']) {
|
|
258
|
+
process.stderr.write(
|
|
259
|
+
'Usage: node scripts/cache-thresholds.js --slug <slug> --run-id <run-id>' +
|
|
260
|
+
' [--analysis-dir <dir>] [--repo-root <path>]\n',
|
|
261
|
+
);
|
|
262
|
+
process.exit(2);
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
const slug = String(args.slug);
|
|
266
|
+
const runId = String(args['run-id']);
|
|
267
|
+
const repoRoot = args['repo-root']
|
|
268
|
+
? String(args['repo-root'])
|
|
269
|
+
: process.cwd();
|
|
270
|
+
const runDir = args['analysis-dir']
|
|
271
|
+
? path.join(String(args['analysis-dir']), 'runs', runId)
|
|
272
|
+
: undefined;
|
|
273
|
+
|
|
274
|
+
try {
|
|
275
|
+
const result = buildThresholdsCache(repoRoot, slug, runId, runDir);
|
|
276
|
+
|
|
277
|
+
process.stdout.write(
|
|
278
|
+
JSON.stringify({
|
|
279
|
+
status: 'ok',
|
|
280
|
+
outputFile: result.outputFile,
|
|
281
|
+
slug,
|
|
282
|
+
matchedTypes: result.matchedTypes,
|
|
283
|
+
artifactCount: Object.keys(result.floors).length,
|
|
284
|
+
defaultFloor: result.defaultFloor,
|
|
285
|
+
contentHash: result.contentHash,
|
|
286
|
+
cached: result.cached,
|
|
287
|
+
}) + '\n',
|
|
288
|
+
);
|
|
289
|
+
} catch (err) {
|
|
290
|
+
process.stderr.write(`Error: ${err}\n`);
|
|
291
|
+
process.exit(1);
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
// Standard ESM CLI guard
|
|
296
|
+
const isMain =
|
|
297
|
+
typeof process !== 'undefined' &&
|
|
298
|
+
process.argv[1] !== undefined &&
|
|
299
|
+
(process.argv[1] === fileURLToPath(import.meta.url) ||
|
|
300
|
+
process.argv[1].endsWith('/cache-thresholds.js'));
|
|
301
|
+
|
|
302
|
+
if (isMain) {
|
|
303
|
+
main();
|
|
304
|
+
}
|
|
305
|
+
/* c8 ignore stop */
|