halib 0.1.65__tar.gz → 0.1.66__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {halib-0.1.65 → halib-0.1.66}/PKG-INFO +2 -2
  2. {halib-0.1.65 → halib-0.1.66}/README.md +1 -1
  3. halib-0.1.66/halib/research/metrics.py +121 -0
  4. {halib-0.1.65 → halib-0.1.66}/halib/research/perfcalc.py +54 -145
  5. {halib-0.1.65 → halib-0.1.66}/halib.egg-info/PKG-INFO +2 -2
  6. {halib-0.1.65 → halib-0.1.66}/halib.egg-info/SOURCES.txt +1 -0
  7. {halib-0.1.65 → halib-0.1.66}/setup.py +1 -1
  8. {halib-0.1.65 → halib-0.1.66}/.gitignore +0 -0
  9. {halib-0.1.65 → halib-0.1.66}/GDriveFolder.txt +0 -0
  10. {halib-0.1.65 → halib-0.1.66}/LICENSE.txt +0 -0
  11. {halib-0.1.65 → halib-0.1.66}/MANIFEST.in +0 -0
  12. {halib-0.1.65 → halib-0.1.66}/guide_publish_pip.pdf +0 -0
  13. {halib-0.1.65 → halib-0.1.66}/halib/__init__.py +0 -0
  14. {halib-0.1.65 → halib-0.1.66}/halib/common.py +0 -0
  15. {halib-0.1.65 → halib-0.1.66}/halib/cuda.py +0 -0
  16. {halib-0.1.65 → halib-0.1.66}/halib/filetype/__init__.py +0 -0
  17. {halib-0.1.65 → halib-0.1.66}/halib/filetype/csvfile.py +0 -0
  18. {halib-0.1.65 → halib-0.1.66}/halib/filetype/jsonfile.py +0 -0
  19. {halib-0.1.65 → halib-0.1.66}/halib/filetype/textfile.py +0 -0
  20. {halib-0.1.65 → halib-0.1.66}/halib/filetype/videofile.py +0 -0
  21. {halib-0.1.65 → halib-0.1.66}/halib/filetype/yamlfile.py +0 -0
  22. {halib-0.1.65 → halib-0.1.66}/halib/online/__init__.py +0 -0
  23. {halib-0.1.65 → halib-0.1.66}/halib/online/gdrive.py +0 -0
  24. {halib-0.1.65 → halib-0.1.66}/halib/online/gdrive_mkdir.py +0 -0
  25. {halib-0.1.65 → halib-0.1.66}/halib/online/gdrive_test.py +0 -0
  26. {halib-0.1.65 → halib-0.1.66}/halib/online/projectmake.py +0 -0
  27. {halib-0.1.65 → halib-0.1.66}/halib/research/__init__.py +0 -0
  28. {halib-0.1.65 → halib-0.1.66}/halib/research/dataset.py +0 -0
  29. {halib-0.1.65 → halib-0.1.66}/halib/research/perftb.py +0 -0
  30. {halib-0.1.65 → halib-0.1.66}/halib/research/plot.py +0 -0
  31. {halib-0.1.65 → halib-0.1.66}/halib/research/torchloader.py +0 -0
  32. {halib-0.1.65 → halib-0.1.66}/halib/research/wandb_op.py +0 -0
  33. {halib-0.1.65 → halib-0.1.66}/halib/rich_color.py +0 -0
  34. {halib-0.1.65 → halib-0.1.66}/halib/system/__init__.py +0 -0
  35. {halib-0.1.65 → halib-0.1.66}/halib/system/cmd.py +0 -0
  36. {halib-0.1.65 → halib-0.1.66}/halib/system/filesys.py +0 -0
  37. {halib-0.1.65 → halib-0.1.66}/halib/utils/__init__.py +0 -0
  38. {halib-0.1.65 → halib-0.1.66}/halib/utils/dataclass_util.py +0 -0
  39. {halib-0.1.65 → halib-0.1.66}/halib/utils/dict_op.py +0 -0
  40. {halib-0.1.65 → halib-0.1.66}/halib/utils/gpu_mon.py +0 -0
  41. {halib-0.1.65 → halib-0.1.66}/halib/utils/listop.py +0 -0
  42. {halib-0.1.65 → halib-0.1.66}/halib/utils/tele_noti.py +0 -0
  43. {halib-0.1.65 → halib-0.1.66}/halib/utils/video.py +0 -0
  44. {halib-0.1.65 → halib-0.1.66}/halib.egg-info/dependency_links.txt +0 -0
  45. {halib-0.1.65 → halib-0.1.66}/halib.egg-info/requires.txt +0 -0
  46. {halib-0.1.65 → halib-0.1.66}/halib.egg-info/top_level.txt +0 -0
  47. {halib-0.1.65 → halib-0.1.66}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: halib
3
- Version: 0.1.65
3
+ Version: 0.1.66
4
4
  Summary: Small library for common tasks
5
5
  Author: Hoang Van Ha
6
6
  Author-email: hoangvanhauit@gmail.com
@@ -52,7 +52,7 @@ Dynamic: summary
52
52
 
53
53
  Helper package for coding and automation
54
54
 
55
- **Version 0.1.65**
55
+ **Version 0.1.66**
56
56
 
57
57
  + now use `uv` for venv management
58
58
  + `research/perfcalc`: support both torchmetrics and custom metrics for performance calculation
@@ -1,6 +1,6 @@
1
1
  Helper package for coding and automation
2
2
 
3
- **Version 0.1.65**
3
+ **Version 0.1.66**
4
4
 
5
5
  + now use `uv` for venv management
6
6
  + `research/perfcalc`: support both torchmetrics and custom metrics for performance calculation
@@ -0,0 +1,121 @@
1
+ # -------------------------------
2
+ # Metrics Backend Interface
3
+ # -------------------------------
4
+ import inspect
5
+ from typing import Dict, Union, List, Any
6
+ from abc import ABC, abstractmethod
7
+
8
+ class MetricsBackend(ABC):
9
+ """Interface for pluggable metrics computation backends."""
10
+
11
+ def __init__(self, metrics_info: Union[List[str], Dict[str, Any]]):
12
+ """
13
+ Initialize the backend with optional metrics_info.
14
+ """
15
+ self.metric_info = metrics_info
16
+ self.validate_metrics_info(self.metric_info)
17
+
18
+ @property
19
+ def metric_names(self) -> List[str]:
20
+ """
21
+ Return a list of metric names.
22
+ If metric_info is a dict, return its keys; if it's a list, return it directly.
23
+ """
24
+ if isinstance(self.metric_info, dict):
25
+ return list(self.metric_info.keys())
26
+ elif isinstance(self.metric_info, list):
27
+ return self.metric_info
28
+ else:
29
+ raise TypeError("metric_info must be a list or a dict")
30
+
31
+ def validate_metrics_info(self, metrics_info):
32
+ if isinstance(metrics_info, list):
33
+ return metrics_info
34
+ elif isinstance(metrics_info, dict):
35
+ return {k: v for k, v in metrics_info.items() if isinstance(k, str)}
36
+ else:
37
+ raise TypeError(
38
+ "metrics_info must be a list of strings or a dict with string keys"
39
+ )
40
+
41
+ @abstractmethod
42
+ def compute_metrics(
43
+ self, metrics_info: Union[List[str], Dict[str, Any]], metrics_data_dict: Dict[str, Any], *args, **kwargs
44
+ ) -> Dict[str, Any]:
45
+ pass
46
+
47
+ def calc_metrics(
48
+ self, metrics_data_dict: Dict[str, Any], *args, **kwargs
49
+ ) -> Dict[str, Any]:
50
+ """
51
+ Calculate metrics based on the provided metrics_info and data.
52
+ This method should be overridden by subclasses to implement specific metric calculations.
53
+ """
54
+ # prevalidate the metrics_data_dict
55
+ for metric in self.metric_names:
56
+ if metric not in metrics_data_dict:
57
+ raise ValueError(f"Metric '{metric}' not found in provided data.")
58
+ # Call the abstract method to compute metrics
59
+ return self.compute_metrics(self.metric_info, metrics_data_dict, *args, **kwargs)
60
+
61
+ class TorchMetricsBackend(MetricsBackend):
62
+ """TorchMetrics-based backend implementation."""
63
+
64
+ def __init__(self, metrics_info: Union[List[str], Dict[str, Any]]):
65
+ try:
66
+ import torch
67
+ from torchmetrics import Metric
68
+ except ImportError:
69
+ raise ImportError(
70
+ "TorchMetricsBackend requires torch and torchmetrics to be installed."
71
+ )
72
+ self.metric_info = metrics_info
73
+ self.torch = torch
74
+ self.Metric = Metric
75
+ self.validate_metrics_info(metrics_info)
76
+
77
+ def validate_metrics_info(self, metrics_info):
78
+ if not isinstance(metrics_info, dict):
79
+ raise TypeError(
80
+ "TorchMetricsBackend requires metrics_info as a dict {name: MetricInstance}"
81
+ )
82
+ for k, v in metrics_info.items():
83
+ if not isinstance(k, str):
84
+ raise TypeError(f"Key '{k}' is not a string")
85
+ if not isinstance(v, self.Metric):
86
+ raise TypeError(f"Value for key '{k}' must be a torchmetrics.Metric")
87
+ return metrics_info
88
+
89
+ def compute_metrics(self, metrics_info, metrics_data_dict, *args, **kwargs):
90
+ out_dict = {}
91
+ for metric, metric_instance in metrics_info.items():
92
+ if metric not in metrics_data_dict:
93
+ raise ValueError(f"Metric '{metric}' not found in provided data.")
94
+
95
+ metric_data = metrics_data_dict[metric]
96
+ sig = inspect.signature(metric_instance.update)
97
+ expected_args = list(sig.parameters.values())
98
+
99
+ if isinstance(metric_data, dict):
100
+ args = [metric_data[param.name] for param in expected_args]
101
+ elif isinstance(metric_data, (list, tuple)):
102
+ args = metric_data
103
+ else:
104
+ raise TypeError(f"Unsupported data format for metric '{metric}'")
105
+
106
+ if len(expected_args) == 1:
107
+ metric_instance.update(args)
108
+ else:
109
+ metric_instance.update(*args)
110
+
111
+ computed_value = metric_instance.compute()
112
+ if isinstance(computed_value, self.torch.Tensor):
113
+ computed_value = (
114
+ computed_value.item()
115
+ if computed_value.numel() == 1
116
+ else computed_value.tolist()
117
+ )
118
+
119
+
120
+ out_dict[metric] = computed_value
121
+ return out_dict
@@ -8,62 +8,26 @@ from functools import wraps
8
8
  from rich.pretty import pprint
9
9
 
10
10
  from abc import ABC, abstractmethod
11
+ from collections import OrderedDict
11
12
 
12
13
  from ..filetype import csvfile
13
14
  from ..common import now_str
14
15
  from ..research.perftb import PerfTB
15
- from collections import OrderedDict
16
-
17
- # try to import torch, and torchmetrics
18
- try:
19
- import torch
20
- import torchmetrics
21
- from torchmetrics import Metric
22
- except ImportError:
23
- raise ImportError("Please install torch and torchmetrics to use this module.")
24
-
25
- def validate_torch_metrics(fn):
26
- @wraps(fn)
27
- def wrapper(self, *args, **kwargs):
28
- result = fn(self, *args, **kwargs)
16
+ from ..research.metrics import *
29
17
 
30
- if not isinstance(result, dict):
31
- raise TypeError("torch_metrics() must return a dictionary")
18
+ # # try to import torch, and torchmetrics
19
+ # try:
20
+ # import torch
21
+ # import torchmetrics
22
+ # from torchmetrics import Metric
23
+ # except ImportError:
24
+ # raise ImportError("Please install torch and torchmetrics to use this module.")
32
25
 
33
- for k, v in result.items():
34
- if not isinstance(k, str):
35
- raise TypeError(f"Key '{k}' is not a string")
36
- if not isinstance(v, Metric):
37
- raise TypeError(
38
- f"Value for key '{k}' is not a torchmetrics.Metric (got {type(v).__name__})"
39
- )
40
-
41
- return result
42
-
43
- return wrapper
44
- def valid_custom_fields(fn):
45
- @wraps(fn)
46
- def wrapper(self, *args, **kwargs):
47
- rs = fn(self, *args, **kwargs)
48
- if not isinstance(rs, tuple) or len(rs) != 2:
49
- raise ValueError("Function must return a tuple (outdict, custom_fields)")
50
- outdict, custom_fields = rs
51
- if not isinstance(outdict, dict):
52
- raise TypeError("Output must be a dictionary")
53
- if not isinstance(custom_fields, list):
54
- raise TypeError("Custom fields must be a list")
55
- for field in custom_fields:
56
- if not isinstance(field, str):
57
- raise TypeError(f"Custom field '{field}' is not a string")
58
- return outdict, custom_fields
59
-
60
- return wrapper
61
26
 
62
27
  REQUIRED_COLS = ["experiment", "dataset"]
63
28
  CSV_FILE_POSTFIX = "__perf"
64
29
 
65
30
  class PerfCalc(ABC): # Abstract base class for performance calculation
66
-
67
31
  @abstractmethod
68
32
  def get_experiment_name(self):
69
33
  """
@@ -81,22 +45,17 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
81
45
  pass
82
46
 
83
47
  @abstractmethod
84
- def get_metrics_info(self):
48
+ def get_metric_backend(self) -> MetricsBackend:
85
49
  """
86
50
  Return a list of metric names to be used for performance calculation OR a dictionaray with keys as metric names and values as metric instances of torchmetrics.Metric. For example: {"accuracy": Accuracy(), "precision": Precision()}
87
51
 
88
52
  """
89
53
  pass
90
54
 
91
- def calc_exp_outdict_custom_fields(self, outdict, *args, **kwargs):
92
- """Can be overridden by the subclass to add custom fields to the output dictionary.
93
- ! must return the modified outdict, and a ordered list of custom fields to be added to the output dictionary.
94
- """
95
- return outdict, []
96
-
97
55
  # ! can be override, but ONLY if torchmetrics are used
98
56
  # Prepare the exp data for torch metrics.
99
- def prepare_torch_metrics_exp_data(self, metric_names, *args, **kwargs):
57
+ @abstractmethod
58
+ def prepare_metrics_data_dict(self, metric_names, *args, **kwargs):
100
59
  """
101
60
  Prepare the data for metrics.
102
61
  This function should be overridden by the subclass if needed.
@@ -105,99 +64,47 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
105
64
  """
106
65
  pass
107
66
 
108
- def __validate_metrics_info(self, metrics_info):
109
- """
110
- Validate the metrics_info to ensure it is a list or a dictionary with valid metric names and instances.
111
- """
112
- if not isinstance(metrics_info, (list, dict)):
113
- raise TypeError(f"Metrics info must be a list or a dictionary, got {type(metrics_info).__name__}")
114
-
115
- if isinstance(metrics_info, dict):
116
- for k, v in metrics_info.items():
117
- if not isinstance(k, str):
118
- raise TypeError(f"Key '{k}' is not a string")
119
- if not isinstance(v, Metric):
120
- raise TypeError(f"Value for key '{k}' is not a torchmetrics.Metric (got {type(v).__name__})")
121
- elif isinstance(metrics_info, list):
122
- for metric in metrics_info:
123
- if not isinstance(metric, str):
124
- raise TypeError(f"Metric '{metric}' is not a string")
125
- return metrics_info
126
- def __calc_exp_perf_metrics(self, *args, **kwargs):
127
- """
128
- Calculate the performance metrics for the experiment.
67
+ def calc_exp_outdict_custom_fields(self, outdict, *args, **kwargs):
68
+ """Can be overridden by the subclass to add custom fields to the output dictionary.
69
+ ! must return the modified outdict, and a ordered list of custom fields to be added to the output dictionary.
129
70
  """
130
- metrics_info = self.__validate_metrics_info(self.get_metrics_info())
131
- USED_TORCHMETRICS = isinstance(metrics_info, dict)
132
- metric_names = metrics_info if isinstance(metrics_info, list) else list(metrics_info.keys())
133
- out_dict = {metric: None for metric in metric_names}
134
- out_dict["dataset"] = self.get_dataset_name()
135
- out_dict["experiment"] = self.get_experiment_name()
136
- out_dict, custom_fields = self.calc_exp_outdict_custom_fields(
137
- outdict=out_dict, *args, **kwargs
138
- )
139
- if USED_TORCHMETRICS:
140
- torch_metrics_dict = self.get_metrics_info()
141
- all_metric_data = self.prepare_torch_metrics_exp_data(
142
- metric_names, *args, **kwargs
143
- )
144
- metric_col_names = []
145
- for metric in metric_names:
146
- if metric not in all_metric_data:
147
- raise ValueError(f"Metric '{metric}' not found in provided data.")
148
- tmetric = torch_metrics_dict[metric] # torchmetrics instance
149
- metric_data = all_metric_data[metric] # should be a dict of args/kwargs
150
- # Inspect expected parameters for the metric's update() method
151
- sig = inspect.signature(tmetric.update)
152
- expected_args = list(sig.parameters.values())
153
- # Prepare args in correct order
154
- if isinstance(metric_data, dict):
155
- # Match dict keys to parameter names
156
- args = [metric_data[param.name] for param in expected_args]
157
- elif isinstance(metric_data, (list, tuple)):
158
- args = metric_data
159
- else:
160
- raise TypeError(f"Unsupported data format for metric '{metric}'")
71
+ return outdict, []
161
72
 
162
- # Call update and compute
163
- if len(expected_args) == 1:
164
- tmetric.update(args) # pass as single argument
165
- else:
166
- tmetric.update(*args) # unpack multiple arguments
167
- computed_value = tmetric.compute()
168
- # ensure the computed value converted to a scala value or list array
169
- if isinstance(computed_value, torch.Tensor):
170
- if computed_value.numel() == 1:
171
- computed_value = computed_value.item()
172
- else:
173
- computed_value = computed_value.tolist()
174
- col_name = f"metric_{metric}" if "metric_" not in metric else metric
175
- metric_col_names.append(col_name)
176
- out_dict[col_name] = computed_value
177
- else:
178
- # If torchmetrics are not used, calculate metrics using the custom method
179
- metric_rs_dict = self.calc_exp_perf_metrics(
180
- metric_names, *args, **kwargs)
181
- for metric in metric_names:
182
- if metric not in metric_rs_dict:
183
- raise ValueError(f"Metric '{metric}' not found in provided data.")
184
- col_name = f"metric_{metric}" if "metric_" not in metric else metric
185
- out_dict[col_name] = metric_rs_dict[metric]
186
- metric_col_names = [f"metric_{metric}" for metric in metric_names]
187
- ordered_cols = REQUIRED_COLS + custom_fields + metric_col_names
188
- # create a new ordered dictionary with the correct order
189
- out_dict = OrderedDict((col, out_dict[col]) for col in ordered_cols if col in out_dict)
190
- return out_dict
73
+ def __valid_calc_custom_fields(self, fun_results):
74
+ if not isinstance(fun_results, tuple) or len(fun_results) != 2:
75
+ raise ValueError("Function must return a tuple (outdict, custom_fields)")
76
+ outdict, custom_fields = fun_results
77
+ if not isinstance(outdict, dict):
78
+ raise TypeError("Output must be a dictionary")
79
+ if not isinstance(custom_fields, list):
80
+ raise TypeError("Custom fields must be a list")
81
+ for field in custom_fields:
82
+ if not isinstance(field, str):
83
+ raise TypeError(f"Custom field '{field}' is not a string")
84
+ return outdict, custom_fields
191
85
 
192
86
  # ! only need to override this method if torchmetrics are not used
193
87
  def calc_exp_perf_metrics(self, metric_names, *args, **kwargs):
194
- """
195
- Calculate the performance metrics for the experiment, but not using torchmetrics.
196
- This function should be overridden by the subclass if needed.
197
- Must return a dictionary with keys as metric names and values as the calculated metrics.
198
- """
199
- raise NotImplementedError("calc_exp_perf_metrics() must be overridden by the subclass if torchmetrics are not used.")
200
-
88
+ metrics_backend = self.get_metric_backend()
89
+ out_dict = {"dataset": self.get_dataset_name(), "experiment": self.get_experiment_name()}
90
+ out_dict, custom_fields = self.__valid_calc_custom_fields(self.calc_exp_outdict_custom_fields(
91
+ outdict=out_dict, *args, **kwargs
92
+ ))
93
+ metrics_data_dict = self.prepare_metrics_data_dict(
94
+ metric_names, *args, **kwargs
95
+ )
96
+ metric_results = metrics_backend.calc_metrics(
97
+ metrics_data_dict=metrics_data_dict, *args, **kwargs
98
+ )
99
+ metric_results_prefix = {
100
+ f"metric_{k}": v for k, v in metric_results.items()
101
+ }
102
+ out_dict.update(metric_results_prefix)
103
+ ordered_cols = REQUIRED_COLS + custom_fields + list(metric_results_prefix.keys())
104
+ out_dict = OrderedDict(
105
+ (col, out_dict[col]) for col in ordered_cols if col in out_dict
106
+ )
107
+ return out_dict
201
108
 
202
109
  #! custom kwargs:
203
110
  #! outfile - if provided, will save the output to a CSV file with the given path
@@ -210,9 +117,8 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
210
117
  This function should be overridden by the subclass if needed.
211
118
  Must return a dictionary with keys as metric names and values as the calculated metrics.
212
119
  """
213
- out_dict = self.__calc_exp_perf_metrics(*args, **kwargs)
214
- # pprint(f"Output Dictionary: {out_dict}")
215
- # check if any kwargs named "outfile"
120
+ metric_names = self.get_metric_backend().metric_names
121
+ out_dict = self.calc_exp_perf_metrics(metric_names=metric_names, *args, **kwargs)
216
122
  csv_outfile = kwargs.get("outfile", None)
217
123
  if csv_outfile is not None:
218
124
  filePathNoExt, _ = os.path.splitext(csv_outfile)
@@ -284,6 +190,9 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
284
190
  assert len(metric_cols) > 0, "No metric columns found in the DataFrame. Ensure that the CSV files contain metric columns starting with 'metric_'."
285
191
  final_cols = REQUIRED_COLS + metric_cols
286
192
  df = df[final_cols]
193
+ # !hahv debug
194
+ pprint('------ Final DataFrame Columns ------')
195
+ csvfile.fn_display_df(df)
287
196
  # ! validate all rows in df before returning
288
197
  # make sure all rows will have at least values for REQUIRED_COLS and at least one metric column
289
198
  for index, row in df.iterrows():
@@ -383,4 +292,4 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
383
292
  all_exp_perf_df = get_df_for_all_exp_perf(csv_perf_files, csv_sep=csv_sep)
384
293
  csvfile.fn_display_df(all_exp_perf_df)
385
294
  perf_tb = mk_perftb_report(all_exp_perf_df)
386
- return perf_tb
295
+ return perf_tb
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: halib
3
- Version: 0.1.65
3
+ Version: 0.1.66
4
4
  Summary: Small library for common tasks
5
5
  Author: Hoang Van Ha
6
6
  Author-email: hoangvanhauit@gmail.com
@@ -52,7 +52,7 @@ Dynamic: summary
52
52
 
53
53
  Helper package for coding and automation
54
54
 
55
- **Version 0.1.65**
55
+ **Version 0.1.66**
56
56
 
57
57
  + now use `uv` for venv management
58
58
  + `research/perfcalc`: support both torchmetrics and custom metrics for performance calculation
@@ -27,6 +27,7 @@ halib/online/gdrive_test.py
27
27
  halib/online/projectmake.py
28
28
  halib/research/__init__.py
29
29
  halib/research/dataset.py
30
+ halib/research/metrics.py
30
31
  halib/research/perfcalc.py
31
32
  halib/research/perftb.py
32
33
  halib/research/plot.py
@@ -8,7 +8,7 @@ with open("requirements.txt") as f:
8
8
 
9
9
  setuptools.setup(
10
10
  name="halib",
11
- version="0.1.65",
11
+ version="0.1.66",
12
12
  author="Hoang Van Ha",
13
13
  author_email="hoangvanhauit@gmail.com",
14
14
  description="Small library for common tasks",
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes