birdnet-analyzer 2.0.0__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- birdnet_analyzer/__init__.py +9 -8
- birdnet_analyzer/analyze/__init__.py +5 -5
- birdnet_analyzer/analyze/__main__.py +3 -4
- birdnet_analyzer/analyze/cli.py +25 -25
- birdnet_analyzer/analyze/core.py +241 -245
- birdnet_analyzer/analyze/utils.py +692 -701
- birdnet_analyzer/audio.py +368 -372
- birdnet_analyzer/cli.py +709 -707
- birdnet_analyzer/config.py +242 -242
- birdnet_analyzer/eBird_taxonomy_codes_2021E.json +25279 -25279
- birdnet_analyzer/embeddings/__init__.py +3 -4
- birdnet_analyzer/embeddings/__main__.py +3 -3
- birdnet_analyzer/embeddings/cli.py +12 -13
- birdnet_analyzer/embeddings/core.py +69 -70
- birdnet_analyzer/embeddings/utils.py +179 -193
- birdnet_analyzer/evaluation/__init__.py +196 -195
- birdnet_analyzer/evaluation/__main__.py +3 -3
- birdnet_analyzer/evaluation/assessment/__init__.py +0 -0
- birdnet_analyzer/evaluation/assessment/metrics.py +388 -0
- birdnet_analyzer/evaluation/assessment/performance_assessor.py +409 -0
- birdnet_analyzer/evaluation/assessment/plotting.py +379 -0
- birdnet_analyzer/evaluation/preprocessing/__init__.py +0 -0
- birdnet_analyzer/evaluation/preprocessing/data_processor.py +631 -0
- birdnet_analyzer/evaluation/preprocessing/utils.py +98 -0
- birdnet_analyzer/gui/__init__.py +19 -23
- birdnet_analyzer/gui/__main__.py +3 -3
- birdnet_analyzer/gui/analysis.py +175 -174
- birdnet_analyzer/gui/assets/arrow_down.svg +4 -4
- birdnet_analyzer/gui/assets/arrow_left.svg +4 -4
- birdnet_analyzer/gui/assets/arrow_right.svg +4 -4
- birdnet_analyzer/gui/assets/arrow_up.svg +4 -4
- birdnet_analyzer/gui/assets/gui.css +28 -28
- birdnet_analyzer/gui/assets/gui.js +93 -93
- birdnet_analyzer/gui/embeddings.py +619 -620
- birdnet_analyzer/gui/evaluation.py +795 -813
- birdnet_analyzer/gui/localization.py +75 -68
- birdnet_analyzer/gui/multi_file.py +245 -246
- birdnet_analyzer/gui/review.py +519 -527
- birdnet_analyzer/gui/segments.py +191 -191
- birdnet_analyzer/gui/settings.py +128 -129
- birdnet_analyzer/gui/single_file.py +267 -269
- birdnet_analyzer/gui/species.py +95 -95
- birdnet_analyzer/gui/train.py +696 -698
- birdnet_analyzer/gui/utils.py +810 -808
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_af.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ar.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_bg.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ca.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_cs.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_da.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_de.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_el.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_en_uk.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_es.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_fi.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_fr.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_he.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_hr.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_hu.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_in.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_is.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_it.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ja.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ko.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_lt.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ml.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_nl.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_no.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_pl.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_pt_BR.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_pt_PT.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ro.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ru.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sk.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sl.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sr.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sv.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_th.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_tr.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_uk.txt +6522 -6522
- birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_zh.txt +6522 -6522
- birdnet_analyzer/lang/de.json +334 -334
- birdnet_analyzer/lang/en.json +334 -334
- birdnet_analyzer/lang/fi.json +334 -334
- birdnet_analyzer/lang/fr.json +334 -334
- birdnet_analyzer/lang/id.json +334 -334
- birdnet_analyzer/lang/pt-br.json +334 -334
- birdnet_analyzer/lang/ru.json +334 -334
- birdnet_analyzer/lang/se.json +334 -334
- birdnet_analyzer/lang/tlh.json +334 -334
- birdnet_analyzer/lang/zh_TW.json +334 -334
- birdnet_analyzer/model.py +1212 -1243
- birdnet_analyzer/playground.py +5 -0
- birdnet_analyzer/search/__init__.py +3 -3
- birdnet_analyzer/search/__main__.py +3 -3
- birdnet_analyzer/search/cli.py +11 -12
- birdnet_analyzer/search/core.py +78 -78
- birdnet_analyzer/search/utils.py +107 -111
- birdnet_analyzer/segments/__init__.py +3 -3
- birdnet_analyzer/segments/__main__.py +3 -3
- birdnet_analyzer/segments/cli.py +13 -14
- birdnet_analyzer/segments/core.py +81 -78
- birdnet_analyzer/segments/utils.py +383 -394
- birdnet_analyzer/species/__init__.py +3 -3
- birdnet_analyzer/species/__main__.py +3 -3
- birdnet_analyzer/species/cli.py +13 -14
- birdnet_analyzer/species/core.py +35 -35
- birdnet_analyzer/species/utils.py +74 -75
- birdnet_analyzer/train/__init__.py +3 -3
- birdnet_analyzer/train/__main__.py +3 -3
- birdnet_analyzer/train/cli.py +13 -14
- birdnet_analyzer/train/core.py +113 -113
- birdnet_analyzer/train/utils.py +877 -847
- birdnet_analyzer/translate.py +133 -104
- birdnet_analyzer/utils.py +426 -419
- {birdnet_analyzer-2.0.0.dist-info → birdnet_analyzer-2.0.1.dist-info}/METADATA +137 -129
- birdnet_analyzer-2.0.1.dist-info/RECORD +125 -0
- {birdnet_analyzer-2.0.0.dist-info → birdnet_analyzer-2.0.1.dist-info}/WHEEL +1 -1
- {birdnet_analyzer-2.0.0.dist-info → birdnet_analyzer-2.0.1.dist-info}/licenses/LICENSE +18 -18
- birdnet_analyzer-2.0.0.dist-info/RECORD +0 -117
- {birdnet_analyzer-2.0.0.dist-info → birdnet_analyzer-2.0.1.dist-info}/entry_points.txt +0 -0
- {birdnet_analyzer-2.0.0.dist-info → birdnet_analyzer-2.0.1.dist-info}/top_level.txt +0 -0
@@ -1,195 +1,196 @@
|
|
1
|
-
"""
|
2
|
-
Core script for assessing performance of prediction models against annotated data.
|
3
|
-
|
4
|
-
This script uses the `DataProcessor` and `PerformanceAssessor` classes to process prediction and
|
5
|
-
annotation data, compute metrics, and optionally generate plots. It supports flexible configurations
|
6
|
-
for columns, class mappings, and filtering based on selected classes or recordings.
|
7
|
-
"""
|
8
|
-
|
9
|
-
import argparse
|
10
|
-
import json
|
11
|
-
import os
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
if
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
if
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
predictions
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
parser
|
125
|
-
parser.add_argument("--
|
126
|
-
parser.add_argument("--
|
127
|
-
parser.add_argument("--
|
128
|
-
parser.add_argument("--
|
129
|
-
parser.add_argument("--
|
130
|
-
parser.add_argument("--
|
131
|
-
parser.add_argument("--
|
132
|
-
parser.add_argument("--
|
133
|
-
parser.add_argument("--
|
134
|
-
parser.add_argument("--
|
135
|
-
parser.add_argument("--
|
136
|
-
parser.add_argument("--
|
137
|
-
parser.add_argument("--
|
138
|
-
parser.add_argument("--
|
139
|
-
parser.add_argument("--
|
140
|
-
parser.add_argument("--
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
if
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
1
|
+
"""
|
2
|
+
Core script for assessing performance of prediction models against annotated data.
|
3
|
+
|
4
|
+
This script uses the `DataProcessor` and `PerformanceAssessor` classes to process prediction and
|
5
|
+
annotation data, compute metrics, and optionally generate plots. It supports flexible configurations
|
6
|
+
for columns, class mappings, and filtering based on selected classes or recordings.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import argparse
|
10
|
+
import json
|
11
|
+
import os
|
12
|
+
|
13
|
+
from birdnet_analyzer.evaluation.assessment.performance_assessor import (
|
14
|
+
PerformanceAssessor,
|
15
|
+
)
|
16
|
+
from birdnet_analyzer.evaluation.preprocessing.data_processor import DataProcessor
|
17
|
+
|
18
|
+
|
19
|
+
def process_data(
|
20
|
+
annotation_path: str,
|
21
|
+
prediction_path: str,
|
22
|
+
mapping_path: str | None = None,
|
23
|
+
sample_duration: float = 3.0,
|
24
|
+
min_overlap: float = 0.5,
|
25
|
+
recording_duration: float | None = None,
|
26
|
+
columns_annotations: dict[str, str] | None = None,
|
27
|
+
columns_predictions: dict[str, str] | None = None,
|
28
|
+
selected_classes: list[str] | None = None,
|
29
|
+
selected_recordings: list[str] | None = None,
|
30
|
+
metrics_list: tuple[str, ...] = ("accuracy", "precision", "recall"),
|
31
|
+
threshold: float = 0.1,
|
32
|
+
class_wise: bool = False,
|
33
|
+
):
|
34
|
+
"""
|
35
|
+
Processes data, computes metrics, and prepares the performance assessment pipeline.
|
36
|
+
|
37
|
+
Args:
|
38
|
+
annotation_path (str): Path to the annotation file or folder.
|
39
|
+
prediction_path (str): Path to the prediction file or folder.
|
40
|
+
mapping_path (Optional[str]): Path to the class mapping JSON file, if applicable.
|
41
|
+
sample_duration (float): Duration of each sample interval in seconds.
|
42
|
+
min_overlap (float): Minimum overlap required between predictions and annotations.
|
43
|
+
recording_duration (Optional[float]): Total duration of the recordings, if known.
|
44
|
+
columns_annotations (Optional[Dict[str, str]]): Custom column mappings for annotations.
|
45
|
+
columns_predictions (Optional[Dict[str, str]]): Custom column mappings for predictions.
|
46
|
+
selected_classes (Optional[List[str]]): List of classes to include in the analysis.
|
47
|
+
selected_recordings (Optional[List[str]]): List of recordings to include in the analysis.
|
48
|
+
metrics_list (Tuple[str, ...]): Metrics to compute for performance assessment.
|
49
|
+
threshold (float): Confidence threshold for predictions.
|
50
|
+
class_wise (bool): Whether to calculate metrics on a per-class basis.
|
51
|
+
|
52
|
+
Returns:
|
53
|
+
Tuple: Metrics DataFrame, `PerformanceAssessor` object, predictions tensor, labels tensor.
|
54
|
+
"""
|
55
|
+
# Load class mapping if provided
|
56
|
+
if mapping_path:
|
57
|
+
with open(mapping_path) as f:
|
58
|
+
class_mapping = json.load(f)
|
59
|
+
else:
|
60
|
+
class_mapping = None
|
61
|
+
|
62
|
+
# Determine directory and file paths for annotations and predictions
|
63
|
+
annotation_dir, annotation_file = (
|
64
|
+
(os.path.dirname(annotation_path), os.path.basename(annotation_path))
|
65
|
+
if os.path.isfile(annotation_path)
|
66
|
+
else (annotation_path, None)
|
67
|
+
)
|
68
|
+
prediction_dir, prediction_file = (
|
69
|
+
(os.path.dirname(prediction_path), os.path.basename(prediction_path))
|
70
|
+
if os.path.isfile(prediction_path)
|
71
|
+
else (prediction_path, None)
|
72
|
+
)
|
73
|
+
|
74
|
+
# Initialize the DataProcessor to handle and prepare data
|
75
|
+
processor = DataProcessor(
|
76
|
+
prediction_directory_path=prediction_dir,
|
77
|
+
prediction_file_name=prediction_file,
|
78
|
+
annotation_directory_path=annotation_dir,
|
79
|
+
annotation_file_name=annotation_file,
|
80
|
+
class_mapping=class_mapping,
|
81
|
+
sample_duration=sample_duration,
|
82
|
+
min_overlap=min_overlap,
|
83
|
+
columns_predictions=columns_predictions,
|
84
|
+
columns_annotations=columns_annotations,
|
85
|
+
recording_duration=recording_duration,
|
86
|
+
)
|
87
|
+
|
88
|
+
# Get the available classes and recordings
|
89
|
+
available_classes = processor.classes
|
90
|
+
available_recordings = processor.samples_df["filename"].unique().tolist()
|
91
|
+
|
92
|
+
# Default to all classes or recordings if none are specified
|
93
|
+
if selected_classes is None:
|
94
|
+
selected_classes = available_classes
|
95
|
+
if selected_recordings is None:
|
96
|
+
selected_recordings = available_recordings
|
97
|
+
|
98
|
+
# Retrieve predictions and labels tensors for the selected classes and recordings
|
99
|
+
predictions, labels, classes = processor.get_filtered_tensors(selected_classes, selected_recordings)
|
100
|
+
|
101
|
+
num_classes = len(classes)
|
102
|
+
task = "binary" if num_classes == 1 else "multilabel"
|
103
|
+
|
104
|
+
# Initialize the PerformanceAssessor for computing metrics
|
105
|
+
pa = PerformanceAssessor(
|
106
|
+
num_classes=num_classes,
|
107
|
+
threshold=threshold,
|
108
|
+
classes=classes,
|
109
|
+
task=task,
|
110
|
+
metrics_list=metrics_list,
|
111
|
+
)
|
112
|
+
|
113
|
+
# Compute performance metrics
|
114
|
+
metrics_df = pa.calculate_metrics(predictions, labels, per_class_metrics=class_wise)
|
115
|
+
|
116
|
+
return metrics_df, pa, predictions, labels
|
117
|
+
|
118
|
+
|
119
|
+
def main():
|
120
|
+
"""
|
121
|
+
Entry point for the script. Parses command-line arguments and orchestrates the performance assessment pipeline.
|
122
|
+
"""
|
123
|
+
# Set up argument parsing
|
124
|
+
parser = argparse.ArgumentParser(description="Performance Assessor Core Script")
|
125
|
+
parser.add_argument("--annotation_path", required=True, help="Path to annotation file or folder")
|
126
|
+
parser.add_argument("--prediction_path", required=True, help="Path to prediction file or folder")
|
127
|
+
parser.add_argument("--mapping_path", help="Path to class mapping JSON file (optional)")
|
128
|
+
parser.add_argument("--sample_duration", type=float, default=3.0, help="Sample duration in seconds")
|
129
|
+
parser.add_argument("--min_overlap", type=float, default=0.5, help="Minimum overlap in seconds")
|
130
|
+
parser.add_argument("--recording_duration", type=float, help="Recording duration in seconds")
|
131
|
+
parser.add_argument("--columns_annotations", type=json.loads, help="JSON string for columns_annotations")
|
132
|
+
parser.add_argument("--columns_predictions", type=json.loads, help="JSON string for columns_predictions")
|
133
|
+
parser.add_argument("--selected_classes", nargs="+", help="List of selected classes")
|
134
|
+
parser.add_argument("--selected_recordings", nargs="+", help="List of selected recordings")
|
135
|
+
parser.add_argument("--metrics", nargs="+", default=["accuracy", "precision", "recall"], help="List of metrics")
|
136
|
+
parser.add_argument("--threshold", type=float, default=0.1, help="Threshold value (0-1)")
|
137
|
+
parser.add_argument("--class_wise", action="store_true", help="Calculate class-wise metrics")
|
138
|
+
parser.add_argument("--plot_metrics", action="store_true", help="Plot metrics")
|
139
|
+
parser.add_argument("--plot_confusion_matrix", action="store_true", help="Plot confusion matrix")
|
140
|
+
parser.add_argument("--plot_metrics_all_thresholds", action="store_true", help="Plot metrics for all thresholds")
|
141
|
+
parser.add_argument("--output_dir", help="Directory to save plots")
|
142
|
+
|
143
|
+
# Parse arguments
|
144
|
+
args = parser.parse_args()
|
145
|
+
|
146
|
+
# Process data and compute metrics
|
147
|
+
metrics_df, pa, predictions, labels = process_data(
|
148
|
+
annotation_path=args.annotation_path,
|
149
|
+
prediction_path=args.prediction_path,
|
150
|
+
mapping_path=args.mapping_path,
|
151
|
+
sample_duration=args.sample_duration,
|
152
|
+
min_overlap=args.min_overlap,
|
153
|
+
recording_duration=args.recording_duration,
|
154
|
+
columns_annotations=args.columns_annotations,
|
155
|
+
columns_predictions=args.columns_predictions,
|
156
|
+
selected_classes=args.selected_classes,
|
157
|
+
selected_recordings=args.selected_recordings,
|
158
|
+
metrics_list=args.metrics,
|
159
|
+
threshold=args.threshold,
|
160
|
+
class_wise=args.class_wise,
|
161
|
+
)
|
162
|
+
|
163
|
+
# Display the computed metrics
|
164
|
+
print(metrics_df)
|
165
|
+
|
166
|
+
# Create output directory if needed
|
167
|
+
if args.output_dir and not os.path.exists(args.output_dir):
|
168
|
+
os.makedirs(args.output_dir)
|
169
|
+
|
170
|
+
# Generate plots if specified
|
171
|
+
if args.plot_metrics:
|
172
|
+
pa.plot_metrics(predictions, labels, per_class_metrics=args.class_wise)
|
173
|
+
if args.output_dir:
|
174
|
+
import matplotlib.pyplot as plt
|
175
|
+
|
176
|
+
plt.savefig(os.path.join(args.output_dir, "metrics_plot.png"))
|
177
|
+
else:
|
178
|
+
plt.show()
|
179
|
+
|
180
|
+
if args.plot_confusion_matrix:
|
181
|
+
pa.plot_confusion_matrix(predictions, labels)
|
182
|
+
if args.output_dir:
|
183
|
+
import matplotlib.pyplot as plt
|
184
|
+
|
185
|
+
plt.savefig(os.path.join(args.output_dir, "confusion_matrix.png"))
|
186
|
+
else:
|
187
|
+
plt.show()
|
188
|
+
|
189
|
+
if args.plot_metrics_all_thresholds:
|
190
|
+
pa.plot_metrics_all_thresholds(predictions, labels, per_class_metrics=args.class_wise)
|
191
|
+
if args.output_dir:
|
192
|
+
import matplotlib.pyplot as plt
|
193
|
+
|
194
|
+
plt.savefig(os.path.join(args.output_dir, "metrics_all_thresholds.png"))
|
195
|
+
else:
|
196
|
+
plt.show()
|
@@ -1,3 +1,3 @@
|
|
1
|
-
from birdnet_analyzer.evaluation import main
|
2
|
-
|
3
|
-
main()
|
1
|
+
from birdnet_analyzer.evaluation import main
|
2
|
+
|
3
|
+
main()
|
File without changes
|