edges 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of edges might be problematic. Click here for more details.

edges/flow_matching.py CHANGED
@@ -2,15 +2,16 @@ from collections import defaultdict
2
2
  from functools import lru_cache
3
3
  import numpy as np
4
4
  from copy import deepcopy
5
- import json
5
+ import json, time
6
6
  from typing import NamedTuple, List, Optional
7
7
 
8
- from .utils import make_hashable
8
+ from .utils import make_hashable, _short_cf, _head
9
9
 
10
10
 
11
11
  import logging
12
12
 
13
13
  logger = logging.getLogger(__name__)
14
+ logger.addHandler(logging.NullHandler())
14
15
 
15
16
 
16
17
  def preprocess_cfs(cf_list, by="consumer"):
@@ -99,11 +100,13 @@ def process_cf_list(
99
100
  best_cf = cf
100
101
 
101
102
  if best_cf:
102
- logger.debug(f"Best matching CF selected with score {best_score}: {best_cf}")
103
+ logger.debug("Best matching CF selected with score %d: %s", best_score, best_cf)
103
104
  results.append(best_cf)
104
105
  else:
105
106
  logger.debug(
106
- f"No matching CF found for supplier {filtered_supplier} and consumer {filtered_consumer}."
107
+ "No matching CF found for supplier %s and consumer %s.",
108
+ filtered_supplier,
109
+ filtered_consumer,
107
110
  )
108
111
 
109
112
  return results
@@ -577,116 +580,161 @@ def compute_average_cf(
577
580
  required_consumer_fields: set = None,
578
581
  ) -> tuple[str | float, Optional[dict], Optional[dict]]:
579
582
  """
580
- Compute weighted CF and a **canonical** aggregated uncertainty for composite regions.
581
- No sampling is performed here.
582
- Returns:
583
- expr_or_value: str | float
584
- matched_cf_obj: Optional[dict] # present only when exactly 1 CF matched
585
- agg_uncertainty: Optional[dict] # discrete_empirical mixture if >1 CF
583
+ Compute weighted CF and a canonical aggregated uncertainty for composite regions.
584
+ Returns: (expr_or_value, matched_cf_obj|None, agg_uncertainty|None)
586
585
  """
586
+ # Optional timing (only if DEBUG)
587
+ _t0 = time.perf_counter() if logger.isEnabledFor(logging.DEBUG) else None
588
+
587
589
  if not candidate_suppliers and not candidate_consumers:
588
- logger.debug("No candidate suppliers or consumers provided.")
590
+ logger.warning(
591
+ "CF-AVG: no candidate locations provided | supplier_cands=%s | consumer_cands=%s",
592
+ candidate_suppliers,
593
+ candidate_consumers,
594
+ )
589
595
  return 0, None, None
590
596
 
597
+ # -------- Gate 1: location-key presence in cf_index --------
591
598
  valid_location_pairs = [
592
599
  (s, c)
593
600
  for s in candidate_suppliers
594
601
  for c in candidate_consumers
595
602
  if cf_index.get((s, c))
596
603
  ]
604
+
597
605
  if not valid_location_pairs:
598
- logger.debug(
599
- f"No valid location pairs found for suppliers {candidate_suppliers} "
600
- f"and consumers {candidate_consumers}."
601
- )
606
+ if logger.isEnabledFor(logging.DEBUG):
607
+ # show small sample of what keys do exist for quick diagnosis
608
+ some_keys = _head(cf_index.keys(), 10)
609
+ logger.debug(
610
+ "CF-AVG: no (supplier,consumer) keys in cf_index for candidates "
611
+ "| suppliers=%s | consumers=%s | sample_index_keys=%s",
612
+ _head(candidate_suppliers),
613
+ _head(candidate_consumers),
614
+ some_keys,
615
+ )
602
616
  return 0, None, None
617
+ else:
618
+ if logger.isEnabledFor(logging.DEBUG):
619
+ logger.debug(
620
+ "CF-AVG: %d valid (s,c) keys found (showing up to 10): %s",
621
+ len(valid_location_pairs),
622
+ _head(valid_location_pairs, 10),
623
+ )
603
624
 
625
+ # Build field-filtered views (exclude location; added per-loop)
604
626
  filtered_supplier = {
605
627
  k: supplier_info[k]
606
- for k in required_supplier_fields
628
+ for k in (required_supplier_fields or ())
607
629
  if k in supplier_info and k != "location"
608
630
  }
609
631
  filtered_consumer = {
610
632
  k: consumer_info[k]
611
- for k in required_consumer_fields
633
+ for k in (required_consumer_fields or ())
612
634
  if k in consumer_info and k != "location"
613
635
  }
614
636
 
637
+ # -------- Gate 2: field/operator/classification match --------
615
638
  matched = []
639
+ total_candidates_seen = 0
640
+
616
641
  for s_loc, c_loc in valid_location_pairs:
617
- cands = cf_index.get((s_loc, c_loc))
618
- if not cands:
619
- continue
642
+ cands = cf_index.get((s_loc, c_loc)) or []
643
+ total_candidates_seen += len(cands)
644
+
620
645
  filtered_supplier["location"] = s_loc
621
646
  filtered_consumer["location"] = c_loc
622
- matched.extend(process_cf_list(cands, filtered_supplier, filtered_consumer))
647
+
648
+ got = process_cf_list(cands, filtered_supplier, filtered_consumer)
649
+ if logger.isEnabledFor(logging.DEBUG) and got:
650
+ logger.debug(
651
+ "CF-AVG: matched %d/%d CFs @ (%s,%s); example=%s",
652
+ len(got),
653
+ len(cands),
654
+ s_loc,
655
+ c_loc,
656
+ _short_cf(got[0]),
657
+ )
658
+ matched.extend(got)
623
659
 
624
660
  if not matched:
625
- logger.debug(
626
- f"No matched CFs for supplier {supplier_info} and consumer {consumer_info} "
627
- f"with location pairs {valid_location_pairs}."
628
- )
661
+ if logger.isEnabledFor(logging.DEBUG):
662
+ logger.debug(
663
+ "CF-AVG: 0 CFs matched after field/classification checks "
664
+ "| supplier_info=%s | consumer_info=%s | pairs=%s | total_candidates_seen=%d",
665
+ supplier_info,
666
+ consumer_info,
667
+ _head(valid_location_pairs, 10),
668
+ total_candidates_seen,
669
+ )
629
670
  return 0, None, None
630
671
 
672
+ # Weights
631
673
  total_w = sum(cf.get("weight", 0.0) for cf in matched)
632
674
  if total_w == 0:
633
675
  logger.warning(
634
- f"No valid weights found for supplier {supplier_info} and consumer {consumer_info}. "
635
- "Using equal shares."
676
+ "CF-AVG: weights all zero/missing using equal shares | matched=%d | example=%s",
677
+ len(matched),
678
+ _short_cf(matched[0]) if matched else None,
636
679
  )
637
680
  matched_cfs = [(cf, 1.0 / len(matched)) for cf in matched]
638
681
  else:
639
682
  matched_cfs = [(cf, cf.get("weight", 0.0) / total_w) for cf in matched]
640
683
 
641
- assert np.isclose(
642
- sum(s for _, s in matched_cfs), 1.0
643
- ), f"Total shares must equal 1. Got: {sum(s for _, s in matched_cfs)}"
684
+ # Safety check on weights; log before assert explodes
685
+ share_sum = sum(s for _, s in matched_cfs)
686
+ if logger.isEnabledFor(logging.DEBUG):
687
+ logger.debug(
688
+ "CF-AVG: matched=%d | sum_shares=%.6f | example=%s",
689
+ len(matched_cfs),
690
+ share_sum,
691
+ _short_cf(matched_cfs[0][0]) if matched_cfs else None,
692
+ )
693
+
694
+ assert np.isclose(share_sum, 1.0), f"Total shares must equal 1. Got: {share_sum}"
644
695
 
645
- # Weighted expression for deterministic path
696
+ # Build deterministic expression (string)
646
697
  expressions = [f"({share:.3f} * ({cf['value']}))" for cf, share in matched_cfs]
647
698
  expr = " + ".join(expressions)
648
699
 
649
- # === NEW: aggregated uncertainty as a hierarchical mixture (no sampling) ===
650
- # If only one CF: pass through its uncertainty directly
700
+ # Single CF shortcut (pass-through uncertainty)
651
701
  if len(matched_cfs) == 1:
652
702
  single_cf = matched_cfs[0][0]
653
703
  agg_uncertainty = single_cf.get("uncertainty")
704
+ if logger.isEnabledFor(logging.DEBUG):
705
+ dt = (time.perf_counter() - _t0) if _t0 else None
706
+ logger.debug(
707
+ "CF-AVG: single CF path | expr=%s | has_unc=%s | dt=%.3f ms",
708
+ expr,
709
+ bool(agg_uncertainty),
710
+ (dt * 1000.0) if dt else -1.0,
711
+ )
654
712
  return (expr, single_cf, agg_uncertainty)
655
713
 
714
+ # Multi-CF aggregated uncertainty
656
715
  def _cf_sign(cf_obj) -> int | None:
657
- """Infer sign from uncertainty.negative if present, else from numeric value."""
658
716
  neg = (cf_obj.get("uncertainty") or {}).get("negative", None)
659
717
  if neg in (0, 1):
660
718
  return -1 if neg == 1 else +1
661
719
  v = cf_obj.get("value")
662
720
  if isinstance(v, (int, float)):
663
- if v < 0:
664
- return -1
665
- if v > 0:
666
- return +1
667
- return None # unknown (e.g., string expr)
721
+ return -1 if v < 0 else (+1 if v > 0 else None)
722
+ return None
668
723
 
669
- # Try to determine a single aggregate sign across constituents
670
724
  cf_signs = [s for (cf, _sh) in matched_cfs if (s := _cf_sign(cf)) is not None]
671
725
  agg_sign = (
672
726
  cf_signs[0] if (cf_signs and all(s == cf_signs[0] for s in cf_signs)) else None
673
727
  )
674
728
 
675
- # Build child magnitude distributions (no sampling)
676
- child_values = []
677
- child_weights = []
678
-
729
+ child_values, child_weights = [], []
679
730
  for cf, share in matched_cfs:
680
731
  if share <= 0:
681
732
  continue
682
-
683
733
  if cf.get("uncertainty") is not None:
684
- # Copy child's uncertainty and strip sign to make it magnitude-only
685
734
  u = deepcopy(cf["uncertainty"])
686
735
  u["negative"] = 0
687
736
  child_unc = u
688
737
  else:
689
- # Deterministic child → wrap as a 1-point discrete over |value|
690
738
  v = cf.get("value")
691
739
  if isinstance(v, (int, float)):
692
740
  child_unc = {
@@ -695,43 +743,49 @@ def compute_average_cf(
695
743
  "negative": 0,
696
744
  }
697
745
  else:
698
- # We can’t build a magnitude distribution from a symbolic string deterministically.
699
- # Be conservative: skip aggregated-uncertainty so MC still works (deterministic anyway).
746
+ if logger.isEnabledFor(logging.DEBUG):
747
+ logger.debug(
748
+ "CF-AVG: skip agg-unc (symbolic child without unc) | child=%s",
749
+ _short_cf(cf),
750
+ )
700
751
  return (expr, None, None)
701
-
702
752
  child_values.append(child_unc)
703
753
  child_weights.append(float(share))
704
754
 
705
- # Normalize weights and optionally canonicalize order for stable keys
706
755
  wsum = sum(child_weights) or 1.0
707
756
  child_weights = [w / wsum for w in child_weights]
708
757
 
709
- # (Optional but helpful) canonicalize order by serialized child value to stabilize cache keys
710
758
  ordering = sorted(
711
759
  range(len(child_values)),
712
760
  key=lambda i: json.dumps(child_values[i], sort_keys=True),
713
761
  )
714
762
  child_values = [child_values[i] for i in ordering]
715
763
  child_weights = [child_weights[i] for i in ordering]
716
- # remove values and weights where weights == 0
764
+
717
765
  filtered = [
718
766
  (v, w) for v, w in zip(child_values, child_weights) if w > 0 and v is not None
719
767
  ]
720
768
  if not filtered:
769
+ if logger.isEnabledFor(logging.DEBUG):
770
+ logger.debug("CF-AVG: filtered children empty after cleanup.")
721
771
  return 0, None, None
722
772
  child_values, child_weights = zip(*filtered)
723
773
 
724
774
  agg_uncertainty = {
725
775
  "distribution": "discrete_empirical",
726
- "parameters": {
727
- "values": child_values, # each is a distribution dict (magnitude-only)
728
- "weights": child_weights, # shares
729
- },
730
- # IMPORTANT: the sign is carried only at the top level so uptake/release share the same cache key
731
- # make_distribution_key() already ignores "negative"
776
+ "parameters": {"values": list(child_values), "weights": list(child_weights)},
732
777
  }
733
778
  if agg_sign is not None:
734
779
  agg_uncertainty["negative"] = 1 if agg_sign == -1 else 0
735
780
 
736
- # Multi-CF mixture → no single "matched_cf_obj"
781
+ if logger.isEnabledFor(logging.DEBUG):
782
+ dt = (time.perf_counter() - _t0) if _t0 else None
783
+ logger.debug(
784
+ "CF-AVG: success | children=%d | expr_len=%d | agg_sign=%s | dt=%.3f ms",
785
+ len(child_values),
786
+ len(expr),
787
+ agg_sign,
788
+ (dt * 1000.0) if dt else -1.0,
789
+ )
790
+
737
791
  return (expr, None, agg_uncertainty)
edges/georesolver.py CHANGED
@@ -5,23 +5,15 @@ import logging
5
5
  from constructive_geometries import Geomatcher
6
6
  from .utils import load_missing_geographies, get_str
7
7
 
8
+ logger = logging.getLogger(__name__)
9
+ logger.addHandler(logging.NullHandler())
10
+
8
11
 
9
12
  class GeoResolver:
10
13
  def __init__(self, weights: dict):
11
14
  self.weights = {get_str(k): v for k, v in weights.items()}
12
15
  self.weights_key = ",".join(sorted(self.weights.keys()))
13
- self.logger = logging.getLogger("GeoResolver")
14
- self.logger.setLevel(logging.DEBUG)
15
-
16
- if not self.logger.handlers:
17
- fh = logging.FileHandler("georesolver.log")
18
- formatter = logging.Formatter(
19
- "%(asctime)s %(name)s %(levelname)s: %(message)s", datefmt="%H:%M:%S"
20
- )
21
- fh.setFormatter(formatter)
22
- fh.setLevel(logging.DEBUG)
23
- self.logger.addHandler(fh)
24
- self.logger.propagate = False
16
+ self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
25
17
 
26
18
  # Dependencies from constructive_geometries and your utils
27
19
  self.geo = Geomatcher()
@@ -69,7 +61,7 @@ class GeoResolver:
69
61
  if not containing:
70
62
  break
71
63
  except KeyError:
72
- self.logger.info(f"Region: {location}. No geometry found.")
64
+ self.logger.info("Region %s: no geometry found.", location)
73
65
 
74
66
  return results
75
67
 
@@ -0,0 +1,41 @@
1
+ from pathlib import Path
2
+ import logging
3
+ import logging.config
4
+
5
+
6
+ def setup_package_logging(
7
+ log_path: Path = Path("edges.log"), level: int = logging.INFO
8
+ ) -> None:
9
+ """
10
+ Route all logs from the 'edges' package (and submodules) into one file.
11
+ """
12
+ logging.config.dictConfig(
13
+ {
14
+ "version": 1,
15
+ "disable_existing_loggers": False,
16
+ "formatters": {
17
+ "standard": {
18
+ "format": "%(asctime)s | %(levelname)s | %(name)s | %(message)s",
19
+ "datefmt": "%Y-%m-%d %H:%M:%S",
20
+ }
21
+ },
22
+ "handlers": {
23
+ "edges_file": {
24
+ "class": "logging.FileHandler",
25
+ "filename": str(log_path),
26
+ "mode": "w",
27
+ "encoding": "utf-8",
28
+ "formatter": "standard",
29
+ "level": level,
30
+ }
31
+ },
32
+ "loggers": {
33
+ "edges": {
34
+ "handlers": ["edges_file"],
35
+ "level": level,
36
+ "propagate": False,
37
+ }
38
+ },
39
+ "root": {"level": "WARNING", "handlers": []},
40
+ }
41
+ )
edges/uncertainty.py CHANGED
@@ -7,13 +7,20 @@ import json
7
7
  from copy import deepcopy
8
8
  from scipy import stats
9
9
  import hashlib
10
+ import logging
10
11
 
11
12
  from edges.utils import safe_eval
12
13
 
13
14
 
15
+ logger = logging.getLogger(__name__)
16
+ logger.addHandler(logging.NullHandler())
17
+
18
+
14
19
  def get_rng_for_key(key: str, base_seed: int) -> np.random.Generator:
15
20
  key_digest = int(hashlib.sha256(key.encode()).hexdigest(), 16) % (2**32)
16
- return np.random.default_rng(base_seed + key_digest)
21
+ seed = base_seed + key_digest
22
+ logger.debug("Creating RNG with derived seed %d for key %s", seed, key)
23
+ return np.random.default_rng(seed)
17
24
 
18
25
 
19
26
  def make_distribution_key(cf):
@@ -25,6 +32,7 @@ def make_distribution_key(cf):
25
32
  return json.dumps(unc_copy, sort_keys=True)
26
33
  else:
27
34
  # No uncertainty block → return None = skip caching
35
+ logger.debug("No uncertainty block present; skipping cache key.")
28
36
  return None
29
37
 
30
38
 
@@ -107,7 +115,13 @@ def sample_cf_distribution(
107
115
  if dist_name == "discrete_empirical":
108
116
  values = params["values"]
109
117
  weights = np.array(params["weights"])
110
- weights = weights / weights.sum() if weights.sum() != 0 else weights
118
+ if weights.sum() == 0:
119
+ logger.warning(
120
+ "All weights are zero in discrete_empirical; using equal weights."
121
+ )
122
+ weights = np.ones_like(weights, dtype=float) / len(weights)
123
+ else:
124
+ weights = weights / weights.sum()
111
125
 
112
126
  chosen_indices = random_state.choice(len(values), size=n, p=weights)
113
127
 
@@ -184,9 +198,15 @@ def sample_cf_distribution(
184
198
  samples = np.clip(samples, params["minimum"], params["maximum"])
185
199
 
186
200
  else:
201
+ logger.warning(
202
+ "Unknown distribution '%s'; falling back to constant value.", dist_name
203
+ )
187
204
  samples = np.full(n, cf["value"], dtype=float)
188
205
 
189
206
  except ValueError as e:
207
+ logger.error(
208
+ "Error sampling distribution '%s' with parameters %s", dist_name, params
209
+ )
190
210
  raise ValueError(
191
211
  f"Error sampling distribution '{dist_name}' with parameters {params}: {e}"
192
212
  )
edges/utils.py CHANGED
@@ -35,6 +35,7 @@ from .filesystem_constants import DATA_DIR
35
35
 
36
36
 
37
37
  logger = logging.getLogger(__name__)
38
+ logger.addHandler(logging.NullHandler())
38
39
 
39
40
  _eval_cache = {}
40
41
 
@@ -130,23 +131,41 @@ def add_population_and_gdp_data(data: list, weight: str) -> list:
130
131
  # load population data from data/population.yaml
131
132
 
132
133
  if weight == "population":
133
- with open(
134
- DATA_DIR / "metadata" / "population.yaml", "r", encoding="utf-8"
135
- ) as f:
136
- weighting_data = yaml.safe_load(f)
134
+ path = DATA_DIR / "metadata" / "population.yaml"
135
+ try:
136
+ with open(path, "r", encoding="utf-8") as f:
137
+ weighting_data = yaml.safe_load(f)
138
+ except FileNotFoundError:
139
+ logger.error("Population metadata file not found at %s", path)
140
+ raise
137
141
 
138
142
  # load GDP data from data/gdp.yaml
139
143
  if weight == "gdp":
140
- with open(DATA_DIR / "metadata" / "gdp.yaml", "r", encoding="utf-8") as f:
141
- weighting_data = yaml.safe_load(f)
144
+ path = DATA_DIR / "metadata" / "gdp.yaml"
145
+ try:
146
+ with open(path, "r", encoding="utf-8") as f:
147
+ weighting_data = yaml.safe_load(f)
148
+ except FileNotFoundError:
149
+ logger.error("GDP metadata file not found at %s", path)
150
+ raise
142
151
 
143
152
  # add to the data dictionary
153
+ missing = 0
144
154
  for cf in data:
145
155
  for category in ["consumer", "supplier"]:
146
156
  if "location" in cf[category]:
147
157
  if "weight" not in cf:
148
158
  k = cf[category]["location"]
149
- cf["weight"] = weighting_data.get(k, 0)
159
+ w = weighting_data.get(k, 0)
160
+ if not w:
161
+ missing += 1
162
+ cf["weight"] = w
163
+ if missing:
164
+ logger.warning(
165
+ "Added weights with %d missing entries (defaulted to 0) for weight='%s'",
166
+ missing,
167
+ weight,
168
+ )
150
169
 
151
170
  return data
152
171
 
@@ -207,6 +226,7 @@ def get_flow_matrix_positions(mapping: dict) -> list:
207
226
  # Batch retrieve flows using get_activities() (assumed available in bw2data)
208
227
  keys = list(mapping.keys())
209
228
  flows_objs = get_activities(keys)
229
+ logger.debug("Resolved %d flow objects for %d keys", len(flows_objs), len(keys))
210
230
 
211
231
  # Build a lookup mapping both the numeric ID (if available) and (database, code)
212
232
  # tuple to the original flow object.
@@ -229,6 +249,7 @@ def get_flow_matrix_positions(mapping: dict) -> list:
229
249
  flow = f
230
250
  break
231
251
  if flow is None:
252
+ logger.error("Flow with key %s not found in fetched objects", k)
232
253
  raise KeyError(f"Flow with key {k} not found.")
233
254
  data = normalize_flow(flow)
234
255
  result.append(
@@ -304,6 +325,12 @@ def get_activities(keys, **kwargs):
304
325
  nodes.append(obj)
305
326
 
306
327
  if len(nodes) != len(keys):
328
+ logger.error(
329
+ "Requested %d activities but found %d. Keys (sample): %s",
330
+ len(keys),
331
+ len(nodes),
332
+ keys[:5],
333
+ )
307
334
  raise Exception("Not all requested activity objects were found.")
308
335
 
309
336
  return nodes
@@ -412,3 +439,26 @@ def assert_no_nans_in_cf_list(cf_list: list[dict], file_source: str = "<input>")
412
439
  f"NaN detected in {side} field '{k}' of CF at index {i} "
413
440
  f"in {file_source}: {entry}. This field must be removed or filled."
414
441
  )
442
+
443
+
444
+ def _head(seq, n=8):
445
+ try:
446
+ seq = list(seq)
447
+ return seq[:n] + (["…"] if len(seq) > n else [])
448
+ except Exception:
449
+ return seq
450
+
451
+
452
+ def _short_cf(cf: dict, maxlen=160):
453
+ """Compact view of a CF for logs."""
454
+ try:
455
+ core = {
456
+ "value": cf.get("value"),
457
+ "weight": cf.get("weight"),
458
+ "supplier_loc": cf.get("supplier", {}).get("location"),
459
+ "consumer_loc": cf.get("consumer", {}).get("location"),
460
+ }
461
+ s = json.dumps(core, sort_keys=True)
462
+ return (s[: maxlen - 1] + "…") if len(s) > maxlen else s
463
+ except Exception:
464
+ return str(cf)[:maxlen]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: edges
3
- Version: 1.0.0
3
+ Version: 1.0.2
4
4
  Summary: Country-specific characterization factors for the Brightway LCA framework
5
5
  Author-email: Romain Sacchi <romain.sacchi@psi.ch>, Alvaro Hahn Menacho <alvaro.hahn-menacho@psi.ch>
6
6
  Maintainer-email: Romain Sacchi <romain.sacchi@psi.ch>
@@ -1,17 +1,19 @@
1
- edges/__init__.py,sha256=ngeGJD83FAbjQZE0h4G30BSt0rlGEWW3J4sVBgODAoo,348
1
+ edges/__init__.py,sha256=TqfojQSsRPk1P355Hwv7vXTnJ4UkUXmp1tL8pk-_094,398
2
2
  edges/analysis.py,sha256=FkJzqFkormYOzhEP5p7YGsmxwN66JdMfktokGRUKjKI,7388
3
3
  edges/costs.py,sha256=OV46FS627fw6BikbRvtWWdXRvt4oRl8zsA9-Z8bdOkU,23399
4
- edges/edgelcia.py,sha256=y0oaMQEQ9P49-mfL5gjgvgkVsrbJHM_Rwt2qYDkBjBg,102259
4
+ edges/edgelcia.py,sha256=qQO1fAbZwBQ52rZ1h2212OfZMKdWuIsLjMxmqPbk2v4,109472
5
5
  edges/filesystem_constants.py,sha256=FSH8hW84V1MCMvo8UOeZKM5Up26ls6bLbTRvfMl2tAg,207
6
- edges/flow_matching.py,sha256=yhpE0lmrZBv_FJS9SFc2qiuNU6PJs9TdYnAAxdvP70E,25122
7
- edges/georesolver.py,sha256=07a4e8heYG-wyUIWbFZ9VU6DsI9JrBIwa7aPF2-1-MA,3663
6
+ edges/flow_matching.py,sha256=ElG7S6-MUM52fcNgbVarpOu5JlF1lLRu_7FRVIniOdk,26841
7
+ edges/georesolver.py,sha256=a_RDfd0hXgrJAlqui4aLU8RrNLd0HUw3fxyc2TzaM1c,3328
8
+ edges/logging_config.py,sha256=zZLxKsh7on0v9HaGBp45ynqTrEuDa3wPgMvTognoum0,1231
8
9
  edges/matrix_builders.py,sha256=vXl80pKfIeXWkYkTJsD4c8aVrFexWncqwl1saA0gRCM,1800
9
- edges/uncertainty.py,sha256=VyVtqR5VCi5awHzK9dLEXM4n7NQFszSt_1axn0D60Gw,6938
10
- edges/utils.py,sha256=1anorkm36qA7UZGRwKG7_28MAkobZpvj-VWon04prg4,13248
10
+ edges/uncertainty.py,sha256=BfeFFv-qiTYerfKZ2qmql_Y5ps_0-lTqiLdeh9ixMQE,7681
11
+ edges/utils.py,sha256=boVrCOeq4RZjbQ8u-xPMPdiE3uYwXBVfLqRO6NuYkBI,14868
11
12
  edges/data/AWARE 2.0_Country_all_yearly.json,sha256=n84gbYCsF5USg4NHePhSvlVdv0geTgtd-epfCu_Yo48,13187497
12
13
  edges/data/AWARE 2.0_Country_irri_yearly.json,sha256=Dz3uV2_6sHXeZWYEfwDTDXa2IDd8LhQKXtK2_HW5iME,3068174
13
14
  edges/data/AWARE 2.0_Country_non_irri_yearly.json,sha256=ORrik2I8cADIDbLq22o6UXxETSTVYCxwM-zn9q9OcDE,4502866
14
15
  edges/data/AWARE 2.0_Country_unspecified_yearly.json,sha256=ZqfBd3A3nMMcxSXMxBQ-FMwdwGcIf26GDOZPJZZXjzs,3806191
16
+ edges/data/GeoPolRisk_elementary flows_2024.json,sha256=vC8qdxDXzX7OZQZZvQ3YMJRYTztsXk9bo9rXbRiyuok,18249
15
17
  edges/data/GeoPolRisk_paired_2024.json,sha256=epYBpA-uYtjf1Fb5z5rnvjjyTYsc2FzOkt8rQA2oB1k,21542889
16
18
  edges/data/ImpactWorld+ 2.1_Freshwater acidification_damage.json,sha256=ICpDkcn0VFD86hRdFxQWotIHvXI96ayzB6EMdTthvAY,7347463
17
19
  edges/data/ImpactWorld+ 2.1_Freshwater acidification_midpoint.json,sha256=ecx4eLu88mmT5XlqbVeC-xeWpiyEJJ3vRy31_oCBr9E,7362861
@@ -63,7 +65,7 @@ edges/data/SCP_1.0.json,sha256=s0bRZ-IVfUTMnm3F2yMsT-lDSaww1sMviQIOE7xMNJA,60068
63
65
  edges/data/metadata/gdp.yaml,sha256=_I13ZlffUigydP74iGA6eTTNmZdup70KP1GiFQkxRDQ,5509
64
66
  edges/data/metadata/missing_geographies.yaml,sha256=eTzO-I642ecG49_nn03fGPgWl-60VCYstWLjGk4h0i8,33
65
67
  edges/data/metadata/population.yaml,sha256=BnNaYnx2SrfJLf1lf1-fKfG9JIilC95StgJiCu2so3I,4769
66
- edges-1.0.0.dist-info/METADATA,sha256=Ab50iDqNUtJlJrrND0oFnBo5c_jkm9cfrLbZcdBpibQ,9974
67
- edges-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
- edges-1.0.0.dist-info/top_level.txt,sha256=85BmdR7cFR5dfKfZ4mdJ4omGk9t4C89GE6N0RS3CCGA,6
69
- edges-1.0.0.dist-info/RECORD,,
68
+ edges-1.0.2.dist-info/METADATA,sha256=5J6EYmBCgZLCKfxqDl7qUkco5FIJvsw-xP0tcGv3ifQ,9974
69
+ edges-1.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
70
+ edges-1.0.2.dist-info/top_level.txt,sha256=85BmdR7cFR5dfKfZ4mdJ4omGk9t4C89GE6N0RS3CCGA,6
71
+ edges-1.0.2.dist-info/RECORD,,
File without changes