eba-xbridge 1.5.0rc2__py3-none-any.whl → 1.5.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: eba-xbridge
3
- Version: 1.5.0rc2
3
+ Version: 1.5.0rc3
4
4
  Summary: XBRL-XML to XBRL-CSV converter for EBA Taxonomy (version 4.1)
5
5
  License: Apache 2.0
6
6
  License-File: LICENSE
@@ -1,7 +1,7 @@
1
- xbridge/__init__.py,sha256=BrHDgfv0XiuLA3wGkiWOTyi13tkIQKlHy2_r9kdC8mE,68
2
- xbridge/api.py,sha256=IhP-nMHxxw5RLQgKWi1-c5v8OXMRIWuSkS6G5RLmZII,1326
3
- xbridge/converter.py,sha256=X6ZFSyIiXFq_MKpCqtvK9Jno1A-umozs3gs_MiZx0ZQ,25992
4
- xbridge/instance.py,sha256=_Cjle0vt3cEfyWQeStLT9if0aDOio4185ig7YykVGNs,27984
1
+ xbridge/__init__.py,sha256=H66PeuXAnonjcj2yXO8Tis9cgDs-cTh3i3vXOYpDnFw,68
2
+ xbridge/api.py,sha256=NCBz7VRJWE3gID6ndgL4Awoxw0w1yMIIf_OTLRuZyyQ,1559
3
+ xbridge/converter.py,sha256=Xft2lc5R1MMDNftlKRj7NuLTGhDHej-T4xc-z_roIeM,24433
4
+ xbridge/instance.py,sha256=_UNB2j6ykHeOPb18TuDGw8qMq0ER-bV6FYTz7tSmDxc,29709
5
5
  xbridge/modules/ae_ae_4.2.json,sha256=AdFvwZqX0KVP3jF1iHeQc5QSnSMvvT3GvoA2G1AgXis,460165
6
6
  xbridge/modules/ae_con_cir-680-2014_2017-04-04.json,sha256=4n0t9dKJNU8Nb5QHpssrDs8ZLwzI-Mw75ax-ar9pLu0,363273
7
7
  xbridge/modules/ae_con_cir-680-2014_2018-03-31.json,sha256=aVWeLLs20p39kQQUthUzqrxBGKTycqhgX9WLk1rVlNw,363538
@@ -382,7 +382,7 @@ xbridge/modules/sepa_ipr_pay_4.2.json,sha256=JLJvR02LOAJy6SWPRuhV1TT02oXQhsG83FB
382
382
  xbridge/modules.py,sha256=8TheJY7oZIy_n-doALa_9AYwwZFu284jaBWt-aol0MA,22292
383
383
  xbridge/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
384
384
  xbridge/taxonomy_loader.py,sha256=K0lnJVryvkKsaoK3fMis-L2JpmwLO6z3Ruq3yj9FxDY,9317
385
- eba_xbridge-1.5.0rc2.dist-info/METADATA,sha256=XwKBzNPYFZSqK_KtlWwzXlKeCfl90o4_79gsZucf0fs,2088
386
- eba_xbridge-1.5.0rc2.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
387
- eba_xbridge-1.5.0rc2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
388
- eba_xbridge-1.5.0rc2.dist-info/RECORD,,
385
+ eba_xbridge-1.5.0rc3.dist-info/METADATA,sha256=ahBEnyB5K2mskMtLQw7LwqVUnRIPZY8TbzUZ3wd2XeA,2088
386
+ eba_xbridge-1.5.0rc3.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
387
+ eba_xbridge-1.5.0rc3.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
388
+ eba_xbridge-1.5.0rc3.dist-info/RECORD,,
xbridge/__init__.py CHANGED
@@ -2,4 +2,4 @@
2
2
  Init file for eba-xbridge library
3
3
  """
4
4
 
5
- __version__ = "1.5.0rc2"
5
+ __version__ = "1.5.0rc3"
xbridge/api.py CHANGED
@@ -14,6 +14,7 @@ def convert_instance(
14
14
  output_path: Optional[Union[str, Path]] = None,
15
15
  headers_as_datapoints: bool = False,
16
16
  validate_filing_indicators: bool = True,
17
+ strict_validation: bool = True,
17
18
  ) -> Path:
18
19
  """
19
20
  Convert one single instance of XBRL-XML file to a CSV file
@@ -27,6 +28,9 @@ def convert_instance(
27
28
  :param validate_filing_indicators: If True, validate that no facts are orphaned
28
29
  (belong only to non-reported tables). Default is True.
29
30
 
31
+ :param strict_validation: If True (default), raise an error on orphaned facts. If False,
32
+ emit a warning instead and continue.
33
+
30
34
  :return: Converted CSV file.
31
35
 
32
36
  """
@@ -34,7 +38,12 @@ def convert_instance(
34
38
  output_path = Path(".")
35
39
 
36
40
  converter = Converter(instance_path)
37
- return converter.convert(output_path, headers_as_datapoints, validate_filing_indicators)
41
+ return converter.convert(
42
+ output_path,
43
+ headers_as_datapoints,
44
+ validate_filing_indicators,
45
+ strict_validation,
46
+ )
38
47
 
39
48
 
40
49
  def load_instance(instance_path: Union[str, Path]) -> Instance:
xbridge/converter.py CHANGED
@@ -6,10 +6,11 @@ from __future__ import annotations
6
6
 
7
7
  import csv
8
8
  import json
9
+ import warnings
9
10
  from pathlib import Path
10
11
  from shutil import rmtree
11
12
  from tempfile import TemporaryDirectory
12
- from typing import Any, Dict, Set, Union
13
+ from typing import Any, Dict, Union
13
14
  from zipfile import ZipFile
14
15
 
15
16
  import pandas as pd
@@ -76,6 +77,7 @@ class Converter:
76
77
  output_path: Union[str, Path],
77
78
  headers_as_datapoints: bool = False,
78
79
  validate_filing_indicators: bool = True,
80
+ strict_validation: bool = True,
79
81
  ) -> Path:
80
82
  """Convert the ``XML Instance`` to a CSV file or between CSV formats"""
81
83
  if not output_path:
@@ -90,7 +92,9 @@ class Converter:
90
92
  raise ValueError("Module of the instance file not found in the taxonomy")
91
93
 
92
94
  if isinstance(self.instance, XmlInstance):
93
- return self.convert_xml(output_path, headers_as_datapoints, validate_filing_indicators)
95
+ return self.convert_xml(
96
+ output_path, headers_as_datapoints, validate_filing_indicators, strict_validation
97
+ )
94
98
  elif isinstance(self.instance, CsvInstance):
95
99
  if self.module.architecture != "headers":
96
100
  raise ValueError("Cannot convert CSV instance with non-headers architecture")
@@ -103,6 +107,7 @@ class Converter:
103
107
  output_path: Path,
104
108
  headers_as_datapoints: bool = False,
105
109
  validate_filing_indicators: bool = True,
110
+ strict_validation: bool = True,
106
111
  ) -> Path:
107
112
  module_filind_codes = [table.filing_indicator_code for table in self.module.tables]
108
113
 
@@ -147,7 +152,7 @@ class Converter:
147
152
  self._convert_filing_indicator(report_dir)
148
153
 
149
154
  if validate_filing_indicators:
150
- self._validate_filing_indicators()
155
+ self._validate_filing_indicators(strict_validation=strict_validation)
151
156
 
152
157
  with open(MAPPING_PATH / self.module.dim_dom_file_name, "r", encoding="utf-8") as fl:
153
158
  mapping_dict: Dict[str, str] = json.load(fl)
@@ -280,111 +285,55 @@ class Converter:
280
285
  instance_df = instance_df.loc[mask]
281
286
  instance_df.drop(columns=nrd_list, inplace=True)
282
287
 
283
- return instance_df
284
-
285
- def _normalize_allowed_values(
286
- self, table_df: pd.DataFrame, datapoint_df: pd.DataFrame
287
- ) -> pd.DataFrame:
288
- """
289
- Normalizes fact values against allowed_values for each variable.
290
-
291
- For variables with allowed_values:
292
- 1. Extracts code part from fact values (after ":")
293
- 2. Maps to correct namespaced value from allowed_values
294
- 3. Updates dimension columns with normalized values
295
- 4. Validates no unmatched codes remain
296
-
297
- :param table_df: The merged dataframe with facts and variables
298
- :param datapoint_df: The dataframe with variable definitions including allowed_values
299
- :return: The normalized dataframe
300
- """
301
- if "allowed_values" not in datapoint_df.columns:
302
- return table_df
288
+ # Rows missing values for required open keys do not belong to the table
289
+ if open_keys:
290
+ instance_df.dropna(subset=list(open_keys), inplace=True)
303
291
 
304
- # Build mapping: datapoint → {code → full_value}
305
- datapoint_allowed_map: Dict[str, Dict[str, str]] = {}
292
+ return instance_df
306
293
 
307
- for _, row in datapoint_df.iterrows():
308
- datapoint = row.get("datapoint")
309
- allowed_values = row.get("allowed_values")
294
+ def _matching_fact_indices(self, table: Table) -> set[int]:
295
+ """Return indices of instance facts that actually match the table definition."""
296
+ if self.instance.instance_df is None:
297
+ return set()
310
298
 
311
- if not datapoint or not allowed_values or len(allowed_values) == 0:
312
- continue
299
+ instance_df = self._get_instance_df(table)
300
+ if instance_df.empty or table.variable_df is None:
301
+ return set()
313
302
 
314
- # Group allowed values by the dimension they apply to
315
- # For now, we'll apply them to all dimension columns
316
- # In the future, we could make this more sophisticated
317
- code_map: Dict[str, str] = {}
318
- for allowed_val in allowed_values:
319
- if ":" in allowed_val:
320
- code = allowed_val.split(":")[-1]
321
- code_map[code] = allowed_val
322
-
323
- if code_map:
324
- datapoint_allowed_map[datapoint] = code_map
325
-
326
- if not datapoint_allowed_map:
327
- return table_df
328
-
329
- # Identify columns to normalize
330
- # We normalize both dimension columns AND the value column (for enumerated values)
331
- exclude_cols = {"datapoint", "decimals", "unit", "data_type", "allowed_values"}
332
- columns_to_check = [col for col in table_df.columns if col not in exclude_cols]
333
-
334
- # For each column that might contain namespaced values
335
- for dim_col in columns_to_check:
336
- if dim_col not in table_df.columns or table_df[dim_col].isna().all():
337
- continue
303
+ open_keys = set(table.open_keys)
338
304
 
339
- # Check if column contains namespaced values (contains ":")
340
- sample_values = table_df[dim_col].dropna()
341
- if sample_values.empty:
342
- continue
305
+ datapoint_df = table.variable_df.copy()
343
306
 
344
- has_namespace = sample_values.astype(str).str.contains(":", regex=False).any()
345
- if not has_namespace:
346
- continue
307
+ # For validation we match minimally on metric (concept) and any open keys present
308
+ merge_cols: list[str] = []
309
+ if "metric" in datapoint_df.columns and "metric" in instance_df.columns:
310
+ merge_cols.append("metric")
311
+ merge_cols.extend(
312
+ [key for key in open_keys if key in datapoint_df.columns and key in instance_df.columns]
313
+ )
347
314
 
348
- # Extract codes from values (vectorized operation)
349
- mask = table_df[dim_col].notna()
350
- temp_code_col = f"_{dim_col}_temp_code"
351
- table_df.loc[mask, temp_code_col] = (
352
- table_df.loc[mask, dim_col].astype(str).str.split(":").str[-1]
353
- )
315
+ def _strip_prefix(val: Any) -> Any:
316
+ if isinstance(val, str) and ":" in val:
317
+ return val.split(":", 1)[1]
318
+ return val
354
319
 
355
- # Normalize values for each datapoint
356
- for datapoint, code_map in datapoint_allowed_map.items():
357
- dp_mask = (table_df["datapoint"] == datapoint) & mask
320
+ for col in merge_cols:
321
+ if col in datapoint_df.columns:
322
+ datapoint_df[col] = datapoint_df[col].map(_strip_prefix)
323
+ if col in instance_df.columns:
324
+ instance_df[col] = instance_df[col].map(_strip_prefix)
358
325
 
359
- if not dp_mask.any():
360
- continue
326
+ instance_df = instance_df.copy()
327
+ instance_df["_idx"] = instance_df.index
361
328
 
362
- # Store original values for error reporting
363
- original_values = table_df.loc[dp_mask, dim_col].copy()
364
-
365
- # Map codes to correct full values
366
- normalized_values = table_df.loc[dp_mask, temp_code_col].map(code_map)
367
-
368
- # Update only the values that were successfully mapped
369
- mapped_mask = dp_mask & normalized_values.notna()
370
- table_df.loc[mapped_mask, dim_col] = normalized_values[mapped_mask]
371
-
372
- # Check for values that couldn't be mapped (validation errors)
373
- unmapped_mask = dp_mask & normalized_values.isna()
374
- if unmapped_mask.any():
375
- invalid_codes = table_df.loc[unmapped_mask, temp_code_col].unique()
376
- valid_codes = list(code_map.keys())
377
- raise ValueError(
378
- f"Invalid values for datapoint '{datapoint}' in column '{dim_col}': "
379
- f"Found codes {list(invalid_codes)} but only {valid_codes} are allowed. "
380
- f"Original values: {original_values[unmapped_mask].tolist()}"
381
- )
329
+ merged_df = pd.merge(datapoint_df, instance_df, on=merge_cols, how="inner")
382
330
 
383
- # Clean up temporary column
384
- if temp_code_col in table_df.columns:
385
- table_df.drop(columns=[temp_code_col], inplace=True)
331
+ if open_keys:
332
+ valid_open_keys = [key for key in open_keys if key in merged_df.columns]
333
+ if valid_open_keys:
334
+ merged_df.dropna(subset=valid_open_keys, inplace=True)
386
335
 
387
- return table_df
336
+ return set(merged_df["_idx"].tolist())
388
337
 
389
338
  def _variable_generator(self, table: Table) -> pd.DataFrame:
390
339
  """Returns the dataframe with the CSV file for the table
@@ -406,7 +355,7 @@ class Converter:
406
355
  )
407
356
 
408
357
  # Do the intersection and drop from datapoints the columns and records
409
- datapoint_df = table.variable_df
358
+ datapoint_df = table.variable_df.copy()
410
359
  missing_cols = list(variable_columns - instance_columns)
411
360
  if "data_type" in missing_cols:
412
361
  missing_cols.remove("data_type")
@@ -417,10 +366,20 @@ class Converter:
417
366
 
418
367
  # Join the dataframes on the datapoint_columns
419
368
  merge_cols = list(variable_columns & instance_columns)
420
- table_df = pd.merge(datapoint_df, instance_df, on=merge_cols, how="inner")
421
369
 
422
- # Normalize values against allowed_values
423
- table_df = self._normalize_allowed_values(table_df, datapoint_df)
370
+ def _strip_prefix(val: Any) -> Any:
371
+ if isinstance(val, str) and ":" in val:
372
+ return val.split(":", 1)[1]
373
+ return val
374
+
375
+ # Align merge columns by stripping any namespace prefixes from both sides
376
+ for col in merge_cols:
377
+ if col in datapoint_df.columns:
378
+ datapoint_df[col] = datapoint_df[col].map(_strip_prefix)
379
+ if col in instance_df.columns:
380
+ instance_df[col] = instance_df[col].map(_strip_prefix)
381
+
382
+ table_df = pd.merge(datapoint_df, instance_df, on=merge_cols, how="inner")
424
383
 
425
384
  if "data_type" in table_df.columns and "decimals" in table_df.columns:
426
385
  decimals_table = table_df[["decimals", "data_type"]].drop_duplicates()
@@ -432,17 +391,27 @@ class Converter:
432
391
  decimals = row["decimals"]
433
392
 
434
393
  if data_type not in self._decimals_parameters:
435
- self._decimals_parameters[data_type] = decimals
394
+ self._decimals_parameters[data_type] = (
395
+ int(decimals) if decimals not in {"INF", "#none"} else decimals
396
+ )
436
397
  else:
437
398
  # If new value is a special value, skip it (prefer numeric values)
438
399
  if decimals in {"INF", "#none"}:
439
400
  pass
440
401
  # If new value is numeric
441
402
  else:
403
+ try:
404
+ decimals = int(decimals)
405
+ except ValueError:
406
+ raise ValueError(
407
+ f"Invalid decimals value: {decimals}, "
408
+ "should be integer, 'INF' or '#none'"
409
+ )
410
+
442
411
  # If existing value is special, replace with numeric
443
- if self._decimals_parameters[data_type] in {"INF", "#none"} or (
444
- isinstance(self._decimals_parameters[data_type], int)
445
- and decimals < self._decimals_parameters[data_type]
412
+ if (
413
+ self._decimals_parameters[data_type] in {"INF", "#none"}
414
+ or decimals < self._decimals_parameters[data_type]
446
415
  ):
447
416
  self._decimals_parameters[data_type] = decimals
448
417
 
@@ -497,13 +466,6 @@ class Converter:
497
466
  # Defined by the EBA in the JSON files. We take them from the taxonomy
498
467
  # Because EBA is using exactly those for the JSON files.
499
468
 
500
- for open_key in table.open_keys:
501
- if open_key in datapoints.columns:
502
- dim_name = mapping_dict.get(open_key)
503
- # For open keys, there are no dim_names (they are not mapped)
504
- if dim_name and not datapoints.empty:
505
- datapoints[open_key] = dim_name + ":" + datapoints[open_key].astype(str)
506
-
507
469
  datapoints.sort_values(by=["datapoint"], ascending=True, inplace=True)
508
470
  output_path_table = temp_dir_path / (table.url or "table.csv")
509
471
 
@@ -550,7 +512,7 @@ class Converter:
550
512
  if fil_ind.value and fil_ind.table:
551
513
  self._reported_tables.append(fil_ind.table)
552
514
 
553
- def _validate_filing_indicators(self) -> None:
515
+ def _validate_filing_indicators(self, strict_validation: bool = True) -> None:
554
516
  """Validate that no facts are orphaned (belong only to non-reported tables).
555
517
 
556
518
  Raises:
@@ -559,44 +521,56 @@ class Converter:
559
521
  if self.instance.instance_df is None or self.instance.instance_df.empty:
560
522
  return
561
523
 
562
- # Step 1: Collect indices of facts that belong to ANY reported table
563
- reported_fact_indices: Set[int] = set()
524
+ # Step 1: Track which facts belong to ANY reported table without materializing a huge set
525
+ reported_mask = pd.Series(False, index=self.instance.instance_df.index)
564
526
  for table in self.module.tables:
565
527
  if table.filing_indicator_code in self._reported_tables:
566
- instance_df = self._get_instance_df(table)
567
- if not instance_df.empty:
568
- # Add all fact indices (DataFrame row indices) to the set
569
- reported_fact_indices.update(instance_df.index)
528
+ reported_indices = self._matching_fact_indices(table)
529
+ if reported_indices:
530
+ reported_mask.loc[list(reported_indices)] = True
570
531
 
571
532
  # Step 2: Find facts that belong ONLY to non-reported tables
572
- all_orphaned_indices = set()
533
+ orphaned_mask = pd.Series(False, index=self.instance.instance_df.index)
573
534
  orphaned_per_table = {}
574
535
 
575
536
  for table in self.module.tables:
576
537
  if table.filing_indicator_code not in self._reported_tables:
577
- instance_df = self._get_instance_df(table)
578
- if not instance_df.empty:
579
- # Find facts that are in this table but NOT in any reported table
580
- orphaned_in_this_table = set(instance_df.index) - reported_fact_indices
538
+ orphaned_indices = self._matching_fact_indices(table)
539
+ if orphaned_indices:
540
+ # Facts in this table that never appear in a reported table
541
+ orphaned_in_this_table = [
542
+ idx for idx in orphaned_indices if not reported_mask.loc[idx]
543
+ ]
581
544
  if orphaned_in_this_table:
545
+ orphaned_mask.loc[orphaned_in_this_table] = True
582
546
  orphaned_per_table[table.filing_indicator_code] = len(
583
547
  orphaned_in_this_table
584
548
  )
585
- all_orphaned_indices.update(orphaned_in_this_table)
586
549
 
587
- if all_orphaned_indices:
550
+ total_orphaned = int(orphaned_mask.sum())
551
+
552
+ if total_orphaned:
588
553
  error_msg = (
589
554
  f"Filing indicator inconsistency detected:\n"
590
- f"Found {len(all_orphaned_indices)} fact(s) that belong ONLY"
555
+ f"Found {total_orphaned} fact(s) that belong ONLY"
591
556
  f" to non-reported tables:\n"
592
557
  )
593
558
  for table_code, count in orphaned_per_table.items():
594
559
  error_msg += f" - {table_code}: {count} fact(s)\n"
560
+
561
+ if strict_validation:
562
+ error_msg += (
563
+ "\nThe conversion process will not continue due to strict validation mode. "
564
+ "Either set filed=true for the relevant tables "
565
+ "or remove these facts from the XML."
566
+ )
567
+ raise ValueError(error_msg)
595
568
  error_msg += (
596
569
  "\nThese facts will be excluded from the output. "
597
- "Either set filed=true for the relevant tables or remove these facts from the XML."
570
+ "Consider setting filed=true for the relevant tables "
571
+ "or removing these facts from the XML."
598
572
  )
599
- raise ValueError(error_msg)
573
+ warnings.warn(error_msg)
600
574
 
601
575
  def _convert_parameters(self, temp_dir_path: Path) -> None:
602
576
  # Workaround;
xbridge/instance.py CHANGED
@@ -13,6 +13,59 @@ from zipfile import ZipFile
13
13
  import pandas as pd
14
14
  from lxml import etree
15
15
 
16
+ # Cache namespace → CSV prefix derivations to avoid repeated string work during parse
17
+ _namespace_prefix_cache: Dict[str, str] = {}
18
+
19
+
20
+ def _derive_csv_prefix(namespace_uri: str) -> Optional[str]:
21
+ """Derive the fixed CSV prefix from a namespace URI using the EBA convention."""
22
+ if not namespace_uri:
23
+ return None
24
+
25
+ cached = _namespace_prefix_cache.get(namespace_uri)
26
+ if cached is not None:
27
+ return cached
28
+
29
+ cleaned = namespace_uri.rstrip("#/")
30
+ if "#" in namespace_uri:
31
+ segment = namespace_uri.rsplit("#", 1)[-1]
32
+ else:
33
+ segment = cleaned.rsplit("/", 1)[-1] if "/" in cleaned else cleaned
34
+
35
+ if not segment:
36
+ return None
37
+
38
+ prefix = f"eba_{segment}"
39
+ _namespace_prefix_cache[namespace_uri] = prefix
40
+ return prefix
41
+
42
+
43
+ def _normalize_namespaced_value(
44
+ value: Optional[str], nsmap: Dict[Optional[str], str]
45
+ ) -> Optional[str]:
46
+ """
47
+ Normalize a namespaced value (e.g., 'dom:qAE' or '{uri}qAE') to the CSV prefix convention.
48
+ Returns the original value if no namespace can be resolved.
49
+ """
50
+ if value is None:
51
+ return None
52
+
53
+ # Clark notation: {uri}local
54
+ if value.startswith("{") and "}" in value:
55
+ uri, local = value[1:].split("}", 1)
56
+ derived = _derive_csv_prefix(uri)
57
+ return f"{derived}:{local}" if derived else value
58
+
59
+ # Prefixed notation: prefix:local
60
+ if ":" in value:
61
+ potential_prefix, local = value.split(":", 1)
62
+ namespace_uri = nsmap.get(potential_prefix)
63
+ if namespace_uri:
64
+ derived = _derive_csv_prefix(namespace_uri)
65
+ return f"{derived}:{local}" if derived else value
66
+
67
+ return value
68
+
16
69
 
17
70
  class Instance:
18
71
  """
@@ -548,7 +601,7 @@ class Scenario:
548
601
  continue
549
602
  dimension = dimension_raw.split(":")[1]
550
603
  value = self.get_value(child)
551
- value = value.split(":")[1] if ":" in value else value
604
+ value = _normalize_namespaced_value(value, child.nsmap) or ""
552
605
  self.dimensions[dimension] = value
553
606
 
554
607
  @staticmethod
@@ -667,7 +720,7 @@ class Fact:
667
720
  def parse(self) -> None:
668
721
  """Parse the XML node with the `fact <https://www.xbrl.org/guidance/xbrl-glossary/#:~:text=accounting%20standards%20body.-,Fact,-A%20fact%20is>`_."""
669
722
  self.metric = self.fact_xml.tag
670
- self.value = self.fact_xml.text
723
+ self.value = _normalize_namespaced_value(self.fact_xml.text, self.fact_xml.nsmap)
671
724
  self.decimals = self.fact_xml.attrib.get("decimals")
672
725
  self.context = self.fact_xml.attrib.get("contextRef")
673
726
  self.unit = self.fact_xml.attrib.get("unitRef")