@tricoteuses/senat 2.15.7 → 2.16.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/model/agenda.js +9 -16
- package/lib/model/commission.d.ts +9 -1
- package/lib/model/commission.js +47 -33
- package/lib/model/seance.js +1 -6
- package/lib/model/util.d.ts +3 -0
- package/lib/model/util.js +32 -0
- package/lib/scripts/retrieve_cr_commission.js +90 -72
- package/lib/scripts/retrieve_videos.d.ts +0 -2
- package/lib/scripts/retrieve_videos.js +57 -33
- package/lib/types/agenda.d.ts +2 -2
- package/lib/utils/cr_spliting.js +4 -2
- package/lib/utils/reunion_grouping.d.ts +1 -1
- package/lib/utils/reunion_grouping.js +13 -42
- package/package.json +1 -1
- package/lib/model/compte_rendu.d.ts +0 -9
- package/lib/model/compte_rendu.js +0 -325
- package/lib/raw_types/db.d.ts +0 -11389
- package/lib/raw_types/db.js +0 -5
- package/lib/scripts/retrieve_comptes_rendus.d.ts +0 -6
- package/lib/scripts/retrieve_comptes_rendus.js +0 -274
package/lib/raw_types/db.js
DELETED
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Needs to be run after retrieve_agenda.ts !
|
|
3
|
-
* - downloads the ZIP of comptes-rendus des débats (CRI) from data.senat.fr
|
|
4
|
-
* - extracts XML files, distributes them by session/year
|
|
5
|
-
*/
|
|
6
|
-
export declare function retrieveCriXmlDump(dataDir: string, options?: Record<string, any>): Promise<void>;
|
|
@@ -1,274 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Needs to be run after retrieve_agenda.ts !
|
|
3
|
-
* - downloads the ZIP of comptes-rendus des débats (CRI) from data.senat.fr
|
|
4
|
-
* - extracts XML files, distributes them by session/year
|
|
5
|
-
*/
|
|
6
|
-
import assert from "assert";
|
|
7
|
-
import commandLineArgs from "command-line-args";
|
|
8
|
-
import fs from "fs-extra";
|
|
9
|
-
import path from "path";
|
|
10
|
-
import StreamZip from "node-stream-zip";
|
|
11
|
-
import * as cheerio from "cheerio";
|
|
12
|
-
import { AGENDA_FOLDER, COMPTES_RENDUS_FOLDER, DATA_ORIGINAL_FOLDER, DATA_TRANSFORMED_FOLDER, } from "../loaders";
|
|
13
|
-
import { commonOptions } from "./shared/cli_helpers";
|
|
14
|
-
import { deriveTitreObjetFromSommaire, parseCompteRenduSlotFromFile, parseYYYYMMDD, sessionStartYearFromDate } from "../model/compte_rendu";
|
|
15
|
-
import { makeGroupUid } from "../utils/reunion_grouping";
|
|
16
|
-
import { getSessionsFromStart } from "../types/sessions";
|
|
17
|
-
import { ensureAndClearDir, fetchWithRetry } from "./shared/util";
|
|
18
|
-
import { computeIntervalsBySlot } from "../utils/cr_spliting";
|
|
19
|
-
const optionsDefinitions = [
|
|
20
|
-
...commonOptions,
|
|
21
|
-
{
|
|
22
|
-
help: "parse and convert comptes-rendus des débats into JSON",
|
|
23
|
-
name: "parseDebats",
|
|
24
|
-
type: Boolean,
|
|
25
|
-
}
|
|
26
|
-
];
|
|
27
|
-
const options = commandLineArgs(optionsDefinitions);
|
|
28
|
-
const CRI_ZIP_URL = "https://data.senat.fr/data/debats/cri.zip";
|
|
29
|
-
const SLOT_ORDER = ["MATIN", "APRES-MIDI", "SOIR"];
|
|
30
|
-
class CompteRenduError extends Error {
|
|
31
|
-
constructor(message, url) {
|
|
32
|
-
super(`An error occurred while retrieving ${url}: ${message}`);
|
|
33
|
-
}
|
|
34
|
-
}
|
|
35
|
-
function pickFirstSlotOfDay(slots) {
|
|
36
|
-
for (const s of SLOT_ORDER)
|
|
37
|
-
if (slots.includes(s))
|
|
38
|
-
return s;
|
|
39
|
-
return null;
|
|
40
|
-
}
|
|
41
|
-
function loadAgendaSPSlotsForDate(dataDir, yyyymmdd, session) {
|
|
42
|
-
const dirPath = path.join(dataDir, AGENDA_FOLDER, DATA_TRANSFORMED_FOLDER, session.toString());
|
|
43
|
-
if (!fs.existsSync(dirPath)) {
|
|
44
|
-
console.warn(`[AGENDA] Directory not found for session ${session} → ${dirPath}`);
|
|
45
|
-
return null;
|
|
46
|
-
}
|
|
47
|
-
const pattern = new RegExp(`^RUSN${yyyymmdd}IDS-(MATIN|APRES-MIDI|SOIR)\\.json$`);
|
|
48
|
-
const ALLOWED_SLOTS = new Set(["MATIN", "APRES-MIDI", "SOIR"]);
|
|
49
|
-
try {
|
|
50
|
-
const files = fs.readdirSync(dirPath);
|
|
51
|
-
const matched = files.filter((f) => pattern.test(f));
|
|
52
|
-
if (matched.length === 0) {
|
|
53
|
-
return null;
|
|
54
|
-
}
|
|
55
|
-
const found = new Set();
|
|
56
|
-
for (const name of matched) {
|
|
57
|
-
const m = name.match(pattern);
|
|
58
|
-
const raw = (m?.[1] ?? "");
|
|
59
|
-
if (ALLOWED_SLOTS.has(raw))
|
|
60
|
-
found.add(raw);
|
|
61
|
-
}
|
|
62
|
-
const slots = Array.from(found);
|
|
63
|
-
if (slots.length === 0) {
|
|
64
|
-
return null;
|
|
65
|
-
}
|
|
66
|
-
return { filePath: dirPath, slots };
|
|
67
|
-
}
|
|
68
|
-
catch {
|
|
69
|
-
return null;
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
async function downloadCriZip(zipPath) {
|
|
73
|
-
if (!options["silent"])
|
|
74
|
-
console.log(`Downloading CRI zip ${CRI_ZIP_URL}…`);
|
|
75
|
-
const response = await fetchWithRetry(CRI_ZIP_URL);
|
|
76
|
-
if (!response.ok) {
|
|
77
|
-
if (response.status === 404) {
|
|
78
|
-
console.warn(`CRI zip ${CRI_ZIP_URL} not found`);
|
|
79
|
-
return;
|
|
80
|
-
}
|
|
81
|
-
throw new CompteRenduError(String(response.status), CRI_ZIP_URL);
|
|
82
|
-
}
|
|
83
|
-
const buf = Buffer.from(await response.arrayBuffer());
|
|
84
|
-
await fs.writeFile(zipPath, buf);
|
|
85
|
-
if (!options["silent"]) {
|
|
86
|
-
const mb = (buf.length / (1024 * 1024)).toFixed(1);
|
|
87
|
-
console.log(`[CRI] Downloaded ${mb} MB → ${zipPath}`);
|
|
88
|
-
}
|
|
89
|
-
}
|
|
90
|
-
async function extractAndDistributeXmlBySession(zipPath, originalRoot) {
|
|
91
|
-
const zip = new StreamZip.async({ file: zipPath });
|
|
92
|
-
const entries = await zip.entries();
|
|
93
|
-
let count = 0;
|
|
94
|
-
for (const entryName of Object.keys(entries)) {
|
|
95
|
-
if (!entryName.toLowerCase().endsWith(".xml"))
|
|
96
|
-
continue;
|
|
97
|
-
// ex: d20231005.xml
|
|
98
|
-
const base = path.basename(entryName);
|
|
99
|
-
const m = base.match(/^d(\d{8})\.xml$/i);
|
|
100
|
-
if (!m)
|
|
101
|
-
continue;
|
|
102
|
-
const yyyymmdd = m[1];
|
|
103
|
-
const dt = parseYYYYMMDD(yyyymmdd);
|
|
104
|
-
if (!dt)
|
|
105
|
-
continue;
|
|
106
|
-
const session = sessionStartYearFromDate(dt);
|
|
107
|
-
const destDir = path.join(originalRoot, String(session));
|
|
108
|
-
await fs.ensureDir(destDir);
|
|
109
|
-
const outPath = path.join(destDir, base);
|
|
110
|
-
await zip.extract(entryName, outPath);
|
|
111
|
-
count++;
|
|
112
|
-
}
|
|
113
|
-
await zip.close();
|
|
114
|
-
return count;
|
|
115
|
-
}
|
|
116
|
-
export async function retrieveCriXmlDump(dataDir, options = {}) {
|
|
117
|
-
const root = path.join(dataDir, COMPTES_RENDUS_FOLDER);
|
|
118
|
-
ensureAndClearDir(root);
|
|
119
|
-
const originalRoot = path.join(root, DATA_ORIGINAL_FOLDER);
|
|
120
|
-
fs.ensureDirSync(originalRoot);
|
|
121
|
-
const transformedRoot = path.join(root, DATA_TRANSFORMED_FOLDER);
|
|
122
|
-
if (options["parseDebats"])
|
|
123
|
-
fs.ensureDirSync(transformedRoot);
|
|
124
|
-
const sessions = getSessionsFromStart(options["fromSession"]);
|
|
125
|
-
// 1) Download ZIP global + distribut by session
|
|
126
|
-
const zipPath = path.join(dataDir, "cri.zip");
|
|
127
|
-
console.log("[CRI] Downloading global CRI zip…");
|
|
128
|
-
await downloadCriZip(zipPath);
|
|
129
|
-
console.log("[CRI] Extracting + distributing XMLs by session…");
|
|
130
|
-
for (const session of sessions) {
|
|
131
|
-
const dir = path.join(originalRoot, String(session));
|
|
132
|
-
if (await fs.pathExists(dir)) {
|
|
133
|
-
for (const f of await fs.readdir(dir))
|
|
134
|
-
if (/\.xml$/i.test(f))
|
|
135
|
-
await fs.remove(path.join(dir, f));
|
|
136
|
-
}
|
|
137
|
-
}
|
|
138
|
-
const n = await extractAndDistributeXmlBySession(zipPath, originalRoot);
|
|
139
|
-
if (n === 0) {
|
|
140
|
-
console.warn("[CRI] No XML extracted. Archive empty or layout changed?");
|
|
141
|
-
}
|
|
142
|
-
else {
|
|
143
|
-
console.log(`[CRI] Distributed ${n} XML file(s) into session folders.`);
|
|
144
|
-
}
|
|
145
|
-
if (!options["parseDebats"]) {
|
|
146
|
-
console.log("[CRI] parseDebats not requested → done.");
|
|
147
|
-
return;
|
|
148
|
-
}
|
|
149
|
-
for (const session of sessions) {
|
|
150
|
-
const originalSessionDir = path.join(originalRoot, String(session));
|
|
151
|
-
if (!(await fs.pathExists(originalSessionDir))) {
|
|
152
|
-
continue;
|
|
153
|
-
}
|
|
154
|
-
const xmlFiles = (await fs.readdir(originalSessionDir))
|
|
155
|
-
.filter((f) => /^d\d{8}\.xml$/i.test(f))
|
|
156
|
-
.sort();
|
|
157
|
-
const transformedSessionDir = path.join(transformedRoot, String(session));
|
|
158
|
-
if (options["parseDebats"])
|
|
159
|
-
await fs.ensureDir(transformedSessionDir);
|
|
160
|
-
for (const f of xmlFiles) {
|
|
161
|
-
const yyyymmdd = f.slice(1, 9);
|
|
162
|
-
const xmlPath = path.join(originalSessionDir, f);
|
|
163
|
-
// 1) Deduce slot(s) from agenda if it exsits
|
|
164
|
-
const agendaInfo = loadAgendaSPSlotsForDate(dataDir, yyyymmdd, session);
|
|
165
|
-
const firstSlotOfDay = pickFirstSlotOfDay(agendaInfo?.slots ?? []);
|
|
166
|
-
// 2) Detect slots from CRI content
|
|
167
|
-
let slotsInCri = [];
|
|
168
|
-
try {
|
|
169
|
-
const raw = await fs.readFile(xmlPath, "utf8");
|
|
170
|
-
const $ = cheerio.load(raw, { xml: false });
|
|
171
|
-
const order = $("body *").toArray();
|
|
172
|
-
const idx = new Map(order.map((el, i) => [el, i]));
|
|
173
|
-
const intervals = computeIntervalsBySlot($, idx, firstSlotOfDay ?? undefined);
|
|
174
|
-
const uniq = new Set();
|
|
175
|
-
for (const iv of intervals)
|
|
176
|
-
if (iv.slot && iv.slot !== "UNKNOWN")
|
|
177
|
-
uniq.add(iv.slot);
|
|
178
|
-
slotsInCri = Array.from(uniq);
|
|
179
|
-
}
|
|
180
|
-
catch (e) {
|
|
181
|
-
console.warn(`[CRI] [${session}] Cannot read/parse ${f}:`, e);
|
|
182
|
-
continue;
|
|
183
|
-
}
|
|
184
|
-
if (slotsInCri.length === 0) {
|
|
185
|
-
slotsInCri = [firstSlotOfDay ?? "MATIN"];
|
|
186
|
-
}
|
|
187
|
-
// 3) Parse & write each slot
|
|
188
|
-
for (const slot of slotsInCri) {
|
|
189
|
-
const outName = `CRSSN${yyyymmdd}-${slot}.json`;
|
|
190
|
-
const cr = await parseCompteRenduSlotFromFile(xmlPath, slot, firstSlotOfDay ?? slot);
|
|
191
|
-
if (!cr) {
|
|
192
|
-
console.warn(`[CRI] [${session}] Empty or no points for ${yyyymmdd} (${slot}) → skip`);
|
|
193
|
-
continue;
|
|
194
|
-
}
|
|
195
|
-
const outDir = transformedSessionDir;
|
|
196
|
-
await fs.ensureDir(outDir);
|
|
197
|
-
const outPath = path.join(outDir, outName);
|
|
198
|
-
await fs.writeJSON(outPath, cr, { spaces: 2 });
|
|
199
|
-
try {
|
|
200
|
-
await linkCriSlotIntoAgendaGrouped(dataDir, yyyymmdd, slot, cr.uid, cr, session);
|
|
201
|
-
}
|
|
202
|
-
catch (e) {
|
|
203
|
-
console.warn(`[AGENDA] [${session}] Could not link CR into grouped for ${yyyymmdd} ${slot}:`, e);
|
|
204
|
-
}
|
|
205
|
-
}
|
|
206
|
-
}
|
|
207
|
-
}
|
|
208
|
-
}
|
|
209
|
-
async function main() {
|
|
210
|
-
const dataDir = options["dataDir"];
|
|
211
|
-
assert(dataDir, "Missing argument: data directory");
|
|
212
|
-
console.time("CRI processing time");
|
|
213
|
-
await retrieveCriXmlDump(dataDir, options);
|
|
214
|
-
if (!options["silent"]) {
|
|
215
|
-
console.timeEnd("CRI processing time");
|
|
216
|
-
}
|
|
217
|
-
}
|
|
218
|
-
main()
|
|
219
|
-
.then(() => process.exit(0))
|
|
220
|
-
.catch((error) => {
|
|
221
|
-
console.error(error);
|
|
222
|
-
process.exit(1);
|
|
223
|
-
});
|
|
224
|
-
async function linkCriSlotIntoAgendaGrouped(dataDir, yyyymmdd, slot, crUid, cr, session) {
|
|
225
|
-
const groupedDir = path.join(dataDir, AGENDA_FOLDER, DATA_TRANSFORMED_FOLDER, session.toString());
|
|
226
|
-
fs.ensureDirSync(groupedDir);
|
|
227
|
-
const groupedPath = path.join(groupedDir, 'RUSN' + yyyymmdd + 'IDS-' + slot + '.json');
|
|
228
|
-
let groups = [];
|
|
229
|
-
if (fs.existsSync(groupedPath)) {
|
|
230
|
-
try {
|
|
231
|
-
groups = JSON.parse(fs.readFileSync(groupedPath, "utf8"));
|
|
232
|
-
if (!Array.isArray(groups))
|
|
233
|
-
groups = [];
|
|
234
|
-
}
|
|
235
|
-
catch (e) {
|
|
236
|
-
console.warn(`[AGENDA] unreadable grouped JSON → ${groupedPath} (${e}) → recreating`);
|
|
237
|
-
groups = [];
|
|
238
|
-
}
|
|
239
|
-
}
|
|
240
|
-
// find existing group with same slot
|
|
241
|
-
const sameSlot = groups.filter(g => g?.slot === slot);
|
|
242
|
-
let target = null;
|
|
243
|
-
if (sameSlot.length > 1) {
|
|
244
|
-
console.warn(`[AGENDA] multiple groups for ${yyyymmdd} ${slot} in ${groupedPath} → linking the first`);
|
|
245
|
-
}
|
|
246
|
-
target = sameSlot[0] ?? null;
|
|
247
|
-
const dateISO = `${yyyymmdd.slice(0, 4)}-${yyyymmdd.slice(4, 6)}-${yyyymmdd.slice(6, 8)}`;
|
|
248
|
-
const sommaire = cr?.metadonnees?.sommaire;
|
|
249
|
-
const { titre: dTitre, objet: dObjet } = deriveTitreObjetFromSommaire(sommaire, slot);
|
|
250
|
-
if (!target) {
|
|
251
|
-
const newGroup = {
|
|
252
|
-
uid: makeGroupUid(dateISO, slot),
|
|
253
|
-
chambre: "SN",
|
|
254
|
-
date: dateISO,
|
|
255
|
-
slot,
|
|
256
|
-
type: "Séance publique",
|
|
257
|
-
startTime: null,
|
|
258
|
-
endTime: null,
|
|
259
|
-
captationVideo: false,
|
|
260
|
-
titre: dTitre,
|
|
261
|
-
objet: dObjet || "",
|
|
262
|
-
events: [],
|
|
263
|
-
compteRenduRefUid: crUid,
|
|
264
|
-
};
|
|
265
|
-
groups.push(newGroup);
|
|
266
|
-
}
|
|
267
|
-
else {
|
|
268
|
-
target.compteRenduRefUid = crUid;
|
|
269
|
-
}
|
|
270
|
-
await fs.writeJSON(groupedPath, groups, { spaces: 2 });
|
|
271
|
-
if (!options["silent"]) {
|
|
272
|
-
console.log(`[AGENDA] Linked CR ${crUid} → ${path.basename(groupedPath)} [${slot}]`);
|
|
273
|
-
}
|
|
274
|
-
}
|