birdnet-analyzer 2.0.1__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. birdnet_analyzer/__init__.py +9 -9
  2. birdnet_analyzer/analyze/__init__.py +19 -5
  3. birdnet_analyzer/analyze/__main__.py +3 -3
  4. birdnet_analyzer/analyze/cli.py +30 -25
  5. birdnet_analyzer/analyze/core.py +268 -241
  6. birdnet_analyzer/analyze/utils.py +700 -692
  7. birdnet_analyzer/audio.py +368 -368
  8. birdnet_analyzer/cli.py +732 -709
  9. birdnet_analyzer/config.py +243 -242
  10. birdnet_analyzer/eBird_taxonomy_codes_2024E.json +13046 -0
  11. birdnet_analyzer/embeddings/__init__.py +3 -3
  12. birdnet_analyzer/embeddings/__main__.py +3 -3
  13. birdnet_analyzer/embeddings/cli.py +12 -12
  14. birdnet_analyzer/embeddings/core.py +70 -69
  15. birdnet_analyzer/embeddings/utils.py +173 -179
  16. birdnet_analyzer/evaluation/__init__.py +189 -196
  17. birdnet_analyzer/evaluation/__main__.py +3 -3
  18. birdnet_analyzer/evaluation/assessment/metrics.py +388 -388
  19. birdnet_analyzer/evaluation/assessment/performance_assessor.py +364 -409
  20. birdnet_analyzer/evaluation/assessment/plotting.py +378 -379
  21. birdnet_analyzer/evaluation/preprocessing/data_processor.py +631 -631
  22. birdnet_analyzer/evaluation/preprocessing/utils.py +98 -98
  23. birdnet_analyzer/gui/__init__.py +19 -19
  24. birdnet_analyzer/gui/__main__.py +3 -3
  25. birdnet_analyzer/gui/analysis.py +179 -175
  26. birdnet_analyzer/gui/assets/arrow_down.svg +4 -4
  27. birdnet_analyzer/gui/assets/arrow_left.svg +4 -4
  28. birdnet_analyzer/gui/assets/arrow_right.svg +4 -4
  29. birdnet_analyzer/gui/assets/arrow_up.svg +4 -4
  30. birdnet_analyzer/gui/assets/gui.css +36 -28
  31. birdnet_analyzer/gui/assets/gui.js +89 -93
  32. birdnet_analyzer/gui/embeddings.py +638 -619
  33. birdnet_analyzer/gui/evaluation.py +801 -795
  34. birdnet_analyzer/gui/localization.py +75 -75
  35. birdnet_analyzer/gui/multi_file.py +265 -245
  36. birdnet_analyzer/gui/review.py +472 -519
  37. birdnet_analyzer/gui/segments.py +191 -191
  38. birdnet_analyzer/gui/settings.py +149 -128
  39. birdnet_analyzer/gui/single_file.py +264 -267
  40. birdnet_analyzer/gui/species.py +95 -95
  41. birdnet_analyzer/gui/train.py +687 -696
  42. birdnet_analyzer/gui/utils.py +803 -810
  43. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_af.txt +6522 -6522
  44. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ar.txt +6522 -6522
  45. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_bg.txt +6522 -6522
  46. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ca.txt +6522 -6522
  47. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_cs.txt +6522 -6522
  48. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_da.txt +6522 -6522
  49. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_de.txt +6522 -6522
  50. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_el.txt +6522 -6522
  51. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_en_uk.txt +6522 -6522
  52. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_es.txt +6522 -6522
  53. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_fi.txt +6522 -6522
  54. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_fr.txt +6522 -6522
  55. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_he.txt +6522 -6522
  56. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_hr.txt +6522 -6522
  57. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_hu.txt +6522 -6522
  58. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_in.txt +6522 -6522
  59. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_is.txt +6522 -6522
  60. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_it.txt +6522 -6522
  61. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ja.txt +6522 -6522
  62. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ko.txt +6522 -6522
  63. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_lt.txt +6522 -6522
  64. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ml.txt +6522 -6522
  65. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_nl.txt +6522 -6522
  66. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_no.txt +6522 -6522
  67. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_pl.txt +6522 -6522
  68. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_pt_BR.txt +6522 -6522
  69. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_pt_PT.txt +6522 -6522
  70. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ro.txt +6522 -6522
  71. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ru.txt +6522 -6522
  72. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sk.txt +6522 -6522
  73. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sl.txt +6522 -6522
  74. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sr.txt +6522 -6522
  75. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sv.txt +6522 -6522
  76. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_th.txt +6522 -6522
  77. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_tr.txt +6522 -6522
  78. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_uk.txt +6522 -6522
  79. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_zh.txt +6522 -6522
  80. birdnet_analyzer/lang/de.json +342 -334
  81. birdnet_analyzer/lang/en.json +342 -334
  82. birdnet_analyzer/lang/fi.json +342 -334
  83. birdnet_analyzer/lang/fr.json +342 -334
  84. birdnet_analyzer/lang/id.json +342 -334
  85. birdnet_analyzer/lang/pt-br.json +342 -334
  86. birdnet_analyzer/lang/ru.json +342 -334
  87. birdnet_analyzer/lang/se.json +342 -334
  88. birdnet_analyzer/lang/tlh.json +342 -334
  89. birdnet_analyzer/lang/zh_TW.json +342 -334
  90. birdnet_analyzer/model.py +1213 -1212
  91. birdnet_analyzer/search/__init__.py +3 -3
  92. birdnet_analyzer/search/__main__.py +3 -3
  93. birdnet_analyzer/search/cli.py +11 -11
  94. birdnet_analyzer/search/core.py +78 -78
  95. birdnet_analyzer/search/utils.py +104 -107
  96. birdnet_analyzer/segments/__init__.py +3 -3
  97. birdnet_analyzer/segments/__main__.py +3 -3
  98. birdnet_analyzer/segments/cli.py +13 -13
  99. birdnet_analyzer/segments/core.py +81 -81
  100. birdnet_analyzer/segments/utils.py +383 -383
  101. birdnet_analyzer/species/__init__.py +3 -3
  102. birdnet_analyzer/species/__main__.py +3 -3
  103. birdnet_analyzer/species/cli.py +13 -13
  104. birdnet_analyzer/species/core.py +35 -35
  105. birdnet_analyzer/species/utils.py +73 -74
  106. birdnet_analyzer/train/__init__.py +3 -3
  107. birdnet_analyzer/train/__main__.py +3 -3
  108. birdnet_analyzer/train/cli.py +13 -13
  109. birdnet_analyzer/train/core.py +113 -113
  110. birdnet_analyzer/train/utils.py +878 -877
  111. birdnet_analyzer/translate.py +132 -133
  112. birdnet_analyzer/utils.py +425 -426
  113. {birdnet_analyzer-2.0.1.dist-info → birdnet_analyzer-2.1.1.dist-info}/METADATA +147 -137
  114. birdnet_analyzer-2.1.1.dist-info/RECORD +124 -0
  115. {birdnet_analyzer-2.0.1.dist-info → birdnet_analyzer-2.1.1.dist-info}/WHEEL +1 -1
  116. {birdnet_analyzer-2.0.1.dist-info → birdnet_analyzer-2.1.1.dist-info}/licenses/LICENSE +18 -18
  117. birdnet_analyzer/eBird_taxonomy_codes_2021E.json +0 -25280
  118. birdnet_analyzer/playground.py +0 -5
  119. birdnet_analyzer-2.0.1.dist-info/RECORD +0 -125
  120. {birdnet_analyzer-2.0.1.dist-info → birdnet_analyzer-2.1.1.dist-info}/entry_points.txt +0 -0
  121. {birdnet_analyzer-2.0.1.dist-info → birdnet_analyzer-2.1.1.dist-info}/top_level.txt +0 -0
@@ -1,196 +1,189 @@
1
- """
2
- Core script for assessing performance of prediction models against annotated data.
3
-
4
- This script uses the `DataProcessor` and `PerformanceAssessor` classes to process prediction and
5
- annotation data, compute metrics, and optionally generate plots. It supports flexible configurations
6
- for columns, class mappings, and filtering based on selected classes or recordings.
7
- """
8
-
9
- import argparse
10
- import json
11
- import os
12
-
13
- from birdnet_analyzer.evaluation.assessment.performance_assessor import (
14
- PerformanceAssessor,
15
- )
16
- from birdnet_analyzer.evaluation.preprocessing.data_processor import DataProcessor
17
-
18
-
19
- def process_data(
20
- annotation_path: str,
21
- prediction_path: str,
22
- mapping_path: str | None = None,
23
- sample_duration: float = 3.0,
24
- min_overlap: float = 0.5,
25
- recording_duration: float | None = None,
26
- columns_annotations: dict[str, str] | None = None,
27
- columns_predictions: dict[str, str] | None = None,
28
- selected_classes: list[str] | None = None,
29
- selected_recordings: list[str] | None = None,
30
- metrics_list: tuple[str, ...] = ("accuracy", "precision", "recall"),
31
- threshold: float = 0.1,
32
- class_wise: bool = False,
33
- ):
34
- """
35
- Processes data, computes metrics, and prepares the performance assessment pipeline.
36
-
37
- Args:
38
- annotation_path (str): Path to the annotation file or folder.
39
- prediction_path (str): Path to the prediction file or folder.
40
- mapping_path (Optional[str]): Path to the class mapping JSON file, if applicable.
41
- sample_duration (float): Duration of each sample interval in seconds.
42
- min_overlap (float): Minimum overlap required between predictions and annotations.
43
- recording_duration (Optional[float]): Total duration of the recordings, if known.
44
- columns_annotations (Optional[Dict[str, str]]): Custom column mappings for annotations.
45
- columns_predictions (Optional[Dict[str, str]]): Custom column mappings for predictions.
46
- selected_classes (Optional[List[str]]): List of classes to include in the analysis.
47
- selected_recordings (Optional[List[str]]): List of recordings to include in the analysis.
48
- metrics_list (Tuple[str, ...]): Metrics to compute for performance assessment.
49
- threshold (float): Confidence threshold for predictions.
50
- class_wise (bool): Whether to calculate metrics on a per-class basis.
51
-
52
- Returns:
53
- Tuple: Metrics DataFrame, `PerformanceAssessor` object, predictions tensor, labels tensor.
54
- """
55
- # Load class mapping if provided
56
- if mapping_path:
57
- with open(mapping_path) as f:
58
- class_mapping = json.load(f)
59
- else:
60
- class_mapping = None
61
-
62
- # Determine directory and file paths for annotations and predictions
63
- annotation_dir, annotation_file = (
64
- (os.path.dirname(annotation_path), os.path.basename(annotation_path))
65
- if os.path.isfile(annotation_path)
66
- else (annotation_path, None)
67
- )
68
- prediction_dir, prediction_file = (
69
- (os.path.dirname(prediction_path), os.path.basename(prediction_path))
70
- if os.path.isfile(prediction_path)
71
- else (prediction_path, None)
72
- )
73
-
74
- # Initialize the DataProcessor to handle and prepare data
75
- processor = DataProcessor(
76
- prediction_directory_path=prediction_dir,
77
- prediction_file_name=prediction_file,
78
- annotation_directory_path=annotation_dir,
79
- annotation_file_name=annotation_file,
80
- class_mapping=class_mapping,
81
- sample_duration=sample_duration,
82
- min_overlap=min_overlap,
83
- columns_predictions=columns_predictions,
84
- columns_annotations=columns_annotations,
85
- recording_duration=recording_duration,
86
- )
87
-
88
- # Get the available classes and recordings
89
- available_classes = processor.classes
90
- available_recordings = processor.samples_df["filename"].unique().tolist()
91
-
92
- # Default to all classes or recordings if none are specified
93
- if selected_classes is None:
94
- selected_classes = available_classes
95
- if selected_recordings is None:
96
- selected_recordings = available_recordings
97
-
98
- # Retrieve predictions and labels tensors for the selected classes and recordings
99
- predictions, labels, classes = processor.get_filtered_tensors(selected_classes, selected_recordings)
100
-
101
- num_classes = len(classes)
102
- task = "binary" if num_classes == 1 else "multilabel"
103
-
104
- # Initialize the PerformanceAssessor for computing metrics
105
- pa = PerformanceAssessor(
106
- num_classes=num_classes,
107
- threshold=threshold,
108
- classes=classes,
109
- task=task,
110
- metrics_list=metrics_list,
111
- )
112
-
113
- # Compute performance metrics
114
- metrics_df = pa.calculate_metrics(predictions, labels, per_class_metrics=class_wise)
115
-
116
- return metrics_df, pa, predictions, labels
117
-
118
-
119
- def main():
120
- """
121
- Entry point for the script. Parses command-line arguments and orchestrates the performance assessment pipeline.
122
- """
123
- # Set up argument parsing
124
- parser = argparse.ArgumentParser(description="Performance Assessor Core Script")
125
- parser.add_argument("--annotation_path", required=True, help="Path to annotation file or folder")
126
- parser.add_argument("--prediction_path", required=True, help="Path to prediction file or folder")
127
- parser.add_argument("--mapping_path", help="Path to class mapping JSON file (optional)")
128
- parser.add_argument("--sample_duration", type=float, default=3.0, help="Sample duration in seconds")
129
- parser.add_argument("--min_overlap", type=float, default=0.5, help="Minimum overlap in seconds")
130
- parser.add_argument("--recording_duration", type=float, help="Recording duration in seconds")
131
- parser.add_argument("--columns_annotations", type=json.loads, help="JSON string for columns_annotations")
132
- parser.add_argument("--columns_predictions", type=json.loads, help="JSON string for columns_predictions")
133
- parser.add_argument("--selected_classes", nargs="+", help="List of selected classes")
134
- parser.add_argument("--selected_recordings", nargs="+", help="List of selected recordings")
135
- parser.add_argument("--metrics", nargs="+", default=["accuracy", "precision", "recall"], help="List of metrics")
136
- parser.add_argument("--threshold", type=float, default=0.1, help="Threshold value (0-1)")
137
- parser.add_argument("--class_wise", action="store_true", help="Calculate class-wise metrics")
138
- parser.add_argument("--plot_metrics", action="store_true", help="Plot metrics")
139
- parser.add_argument("--plot_confusion_matrix", action="store_true", help="Plot confusion matrix")
140
- parser.add_argument("--plot_metrics_all_thresholds", action="store_true", help="Plot metrics for all thresholds")
141
- parser.add_argument("--output_dir", help="Directory to save plots")
142
-
143
- # Parse arguments
144
- args = parser.parse_args()
145
-
146
- # Process data and compute metrics
147
- metrics_df, pa, predictions, labels = process_data(
148
- annotation_path=args.annotation_path,
149
- prediction_path=args.prediction_path,
150
- mapping_path=args.mapping_path,
151
- sample_duration=args.sample_duration,
152
- min_overlap=args.min_overlap,
153
- recording_duration=args.recording_duration,
154
- columns_annotations=args.columns_annotations,
155
- columns_predictions=args.columns_predictions,
156
- selected_classes=args.selected_classes,
157
- selected_recordings=args.selected_recordings,
158
- metrics_list=args.metrics,
159
- threshold=args.threshold,
160
- class_wise=args.class_wise,
161
- )
162
-
163
- # Display the computed metrics
164
- print(metrics_df)
165
-
166
- # Create output directory if needed
167
- if args.output_dir and not os.path.exists(args.output_dir):
168
- os.makedirs(args.output_dir)
169
-
170
- # Generate plots if specified
171
- if args.plot_metrics:
172
- pa.plot_metrics(predictions, labels, per_class_metrics=args.class_wise)
173
- if args.output_dir:
174
- import matplotlib.pyplot as plt
175
-
176
- plt.savefig(os.path.join(args.output_dir, "metrics_plot.png"))
177
- else:
178
- plt.show()
179
-
180
- if args.plot_confusion_matrix:
181
- pa.plot_confusion_matrix(predictions, labels)
182
- if args.output_dir:
183
- import matplotlib.pyplot as plt
184
-
185
- plt.savefig(os.path.join(args.output_dir, "confusion_matrix.png"))
186
- else:
187
- plt.show()
188
-
189
- if args.plot_metrics_all_thresholds:
190
- pa.plot_metrics_all_thresholds(predictions, labels, per_class_metrics=args.class_wise)
191
- if args.output_dir:
192
- import matplotlib.pyplot as plt
193
-
194
- plt.savefig(os.path.join(args.output_dir, "metrics_all_thresholds.png"))
195
- else:
196
- plt.show()
1
+ """
2
+ Core script for assessing performance of prediction models against annotated data.
3
+
4
+ This script uses the `DataProcessor` and `PerformanceAssessor` classes to process prediction and
5
+ annotation data, compute metrics, and optionally generate plots. It supports flexible configurations
6
+ for columns, class mappings, and filtering based on selected classes or recordings.
7
+ """
8
+
9
+ import argparse
10
+ import json
11
+ import os
12
+ from collections.abc import Sequence
13
+
14
+ from birdnet_analyzer.evaluation.assessment.performance_assessor import (
15
+ PerformanceAssessor,
16
+ )
17
+ from birdnet_analyzer.evaluation.preprocessing.data_processor import DataProcessor
18
+
19
+
20
+ def process_data(
21
+ annotation_path: str,
22
+ prediction_path: str,
23
+ mapping_path: str | None = None,
24
+ sample_duration: float = 3.0,
25
+ min_overlap: float = 0.5,
26
+ recording_duration: float | None = None,
27
+ columns_annotations: dict[str, str] | None = None,
28
+ columns_predictions: dict[str, str] | None = None,
29
+ selected_classes: Sequence[str] | None = None,
30
+ selected_recordings: list[str] | None = None,
31
+ metrics_list: tuple[str, ...] = ("accuracy", "precision", "recall"),
32
+ threshold: float = 0.1,
33
+ class_wise: bool = False,
34
+ ):
35
+ """
36
+ Processes data, computes metrics, and prepares the performance assessment pipeline.
37
+
38
+ Args:
39
+ annotation_path (str): Path to the annotation file or folder.
40
+ prediction_path (str): Path to the prediction file or folder.
41
+ mapping_path (Optional[str]): Path to the class mapping JSON file, if applicable.
42
+ sample_duration (float): Duration of each sample interval in seconds.
43
+ min_overlap (float): Minimum overlap required between predictions and annotations.
44
+ recording_duration (Optional[float]): Total duration of the recordings, if known.
45
+ columns_annotations (Optional[Dict[str, str]]): Custom column mappings for annotations.
46
+ columns_predictions (Optional[Dict[str, str]]): Custom column mappings for predictions.
47
+ selected_classes (Optional[List[str]]): List of classes to include in the analysis.
48
+ selected_recordings (Optional[List[str]]): List of recordings to include in the analysis.
49
+ metrics_list (Tuple[str, ...]): Metrics to compute for performance assessment.
50
+ threshold (float): Confidence threshold for predictions.
51
+ class_wise (bool): Whether to calculate metrics on a per-class basis.
52
+
53
+ Returns:
54
+ Tuple: Metrics DataFrame, `PerformanceAssessor` object, predictions tensor, labels tensor.
55
+ """
56
+ # Load class mapping if provided
57
+ if mapping_path:
58
+ with open(mapping_path) as f:
59
+ class_mapping = json.load(f)
60
+ else:
61
+ class_mapping = None
62
+
63
+ # Determine directory and file paths for annotations and predictions
64
+ annotation_dir, annotation_file = (
65
+ (os.path.dirname(annotation_path), os.path.basename(annotation_path)) if os.path.isfile(annotation_path) else (annotation_path, None)
66
+ )
67
+ prediction_dir, prediction_file = (
68
+ (os.path.dirname(prediction_path), os.path.basename(prediction_path)) if os.path.isfile(prediction_path) else (prediction_path, None)
69
+ )
70
+
71
+ # Initialize the DataProcessor to handle and prepare data
72
+ processor = DataProcessor(
73
+ prediction_directory_path=prediction_dir,
74
+ prediction_file_name=prediction_file,
75
+ annotation_directory_path=annotation_dir,
76
+ annotation_file_name=annotation_file,
77
+ class_mapping=class_mapping,
78
+ sample_duration=sample_duration,
79
+ min_overlap=min_overlap,
80
+ columns_predictions=columns_predictions,
81
+ columns_annotations=columns_annotations,
82
+ recording_duration=recording_duration,
83
+ )
84
+
85
+ # Get the available classes and recordings
86
+ available_classes = processor.classes
87
+ available_recordings = processor.samples_df["filename"].unique().tolist()
88
+
89
+ # Default to all classes or recordings if none are specified
90
+ if selected_classes is None:
91
+ selected_classes = available_classes
92
+ if selected_recordings is None:
93
+ selected_recordings = available_recordings
94
+
95
+ # Retrieve predictions and labels tensors for the selected classes and recordings
96
+ predictions, labels, classes = processor.get_filtered_tensors(selected_classes, selected_recordings)
97
+
98
+ num_classes = len(classes)
99
+ task = "binary" if num_classes == 1 else "multilabel"
100
+
101
+ # Initialize the PerformanceAssessor for computing metrics
102
+ pa = PerformanceAssessor(
103
+ num_classes=num_classes,
104
+ threshold=threshold,
105
+ classes=classes,
106
+ task=task,
107
+ metrics_list=metrics_list,
108
+ )
109
+
110
+ # Compute performance metrics
111
+ metrics_df = pa.calculate_metrics(predictions, labels, per_class_metrics=class_wise)
112
+
113
+ return metrics_df, pa, predictions, labels
114
+
115
+
116
+ def main():
117
+ """
118
+ Entry point for the script. Parses command-line arguments and orchestrates the performance assessment pipeline.
119
+ """
120
+ import matplotlib.pyplot as plt
121
+
122
+ # Set up argument parsing
123
+ parser = argparse.ArgumentParser(description="Performance Assessor Core Script")
124
+ parser.add_argument("--annotation_path", required=True, help="Path to annotation file or folder")
125
+ parser.add_argument("--prediction_path", required=True, help="Path to prediction file or folder")
126
+ parser.add_argument("--mapping_path", help="Path to class mapping JSON file (optional)")
127
+ parser.add_argument("--sample_duration", type=float, default=3.0, help="Sample duration in seconds")
128
+ parser.add_argument("--min_overlap", type=float, default=0.5, help="Minimum overlap in seconds")
129
+ parser.add_argument("--recording_duration", type=float, help="Recording duration in seconds")
130
+ parser.add_argument("--columns_annotations", type=json.loads, help="JSON string for columns_annotations")
131
+ parser.add_argument("--columns_predictions", type=json.loads, help="JSON string for columns_predictions")
132
+ parser.add_argument("--selected_classes", nargs="+", help="List of selected classes")
133
+ parser.add_argument("--selected_recordings", nargs="+", help="List of selected recordings")
134
+ parser.add_argument("--metrics", nargs="+", default=["accuracy", "precision", "recall"], help="List of metrics")
135
+ parser.add_argument("--threshold", type=float, default=0.1, help="Threshold value (0-1)")
136
+ parser.add_argument("--class_wise", action="store_true", help="Calculate class-wise metrics")
137
+ parser.add_argument("--plot_metrics", action="store_true", help="Plot metrics")
138
+ parser.add_argument("--plot_confusion_matrix", action="store_true", help="Plot confusion matrix")
139
+ parser.add_argument("--plot_metrics_all_thresholds", action="store_true", help="Plot metrics for all thresholds")
140
+ parser.add_argument("--output_dir", help="Directory to save plots")
141
+
142
+ # Parse arguments
143
+ args = parser.parse_args()
144
+
145
+ # Process data and compute metrics
146
+ metrics_df, pa, predictions, labels = process_data(
147
+ annotation_path=args.annotation_path,
148
+ prediction_path=args.prediction_path,
149
+ mapping_path=args.mapping_path,
150
+ sample_duration=args.sample_duration,
151
+ min_overlap=args.min_overlap,
152
+ recording_duration=args.recording_duration,
153
+ columns_annotations=args.columns_annotations,
154
+ columns_predictions=args.columns_predictions,
155
+ selected_classes=args.selected_classes,
156
+ selected_recordings=args.selected_recordings,
157
+ metrics_list=args.metrics,
158
+ threshold=args.threshold,
159
+ class_wise=args.class_wise,
160
+ )
161
+
162
+ # Display the computed metrics
163
+ print(metrics_df)
164
+
165
+ # Create output directory if needed
166
+ if args.output_dir and not os.path.exists(args.output_dir):
167
+ os.makedirs(args.output_dir)
168
+
169
+ # Generate plots if specified
170
+ if args.plot_metrics:
171
+ pa.plot_metrics(predictions, labels, per_class_metrics=args.class_wise)
172
+ if args.output_dir:
173
+ plt.savefig(os.path.join(args.output_dir, "metrics_plot.png"))
174
+ else:
175
+ plt.show()
176
+
177
+ if args.plot_confusion_matrix:
178
+ pa.plot_confusion_matrix(predictions, labels)
179
+ if args.output_dir:
180
+ plt.savefig(os.path.join(args.output_dir, "confusion_matrix.png"))
181
+ else:
182
+ plt.show()
183
+
184
+ if args.plot_metrics_all_thresholds:
185
+ pa.plot_metrics_all_thresholds(predictions, labels, per_class_metrics=args.class_wise)
186
+ if args.output_dir:
187
+ plt.savefig(os.path.join(args.output_dir, "metrics_all_thresholds.png"))
188
+ else:
189
+ plt.show()
@@ -1,3 +1,3 @@
1
- from birdnet_analyzer.evaluation import main
2
-
3
- main()
1
+ from birdnet_analyzer.evaluation import main
2
+
3
+ main()