birdnet-analyzer 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. birdnet_analyzer/__init__.py +8 -0
  2. birdnet_analyzer/analyze/__init__.py +5 -0
  3. birdnet_analyzer/analyze/__main__.py +4 -0
  4. birdnet_analyzer/analyze/cli.py +25 -0
  5. birdnet_analyzer/analyze/core.py +245 -0
  6. birdnet_analyzer/analyze/utils.py +701 -0
  7. birdnet_analyzer/audio.py +372 -0
  8. birdnet_analyzer/cli.py +707 -0
  9. birdnet_analyzer/config.py +242 -0
  10. birdnet_analyzer/eBird_taxonomy_codes_2021E.json +25280 -0
  11. birdnet_analyzer/embeddings/__init__.py +4 -0
  12. birdnet_analyzer/embeddings/__main__.py +3 -0
  13. birdnet_analyzer/embeddings/cli.py +13 -0
  14. birdnet_analyzer/embeddings/core.py +70 -0
  15. birdnet_analyzer/embeddings/utils.py +193 -0
  16. birdnet_analyzer/evaluation/__init__.py +195 -0
  17. birdnet_analyzer/evaluation/__main__.py +3 -0
  18. birdnet_analyzer/gui/__init__.py +23 -0
  19. birdnet_analyzer/gui/__main__.py +3 -0
  20. birdnet_analyzer/gui/analysis.py +174 -0
  21. birdnet_analyzer/gui/assets/arrow_down.svg +4 -0
  22. birdnet_analyzer/gui/assets/arrow_left.svg +4 -0
  23. birdnet_analyzer/gui/assets/arrow_right.svg +4 -0
  24. birdnet_analyzer/gui/assets/arrow_up.svg +4 -0
  25. birdnet_analyzer/gui/assets/gui.css +29 -0
  26. birdnet_analyzer/gui/assets/gui.js +94 -0
  27. birdnet_analyzer/gui/assets/img/birdnet-icon.ico +0 -0
  28. birdnet_analyzer/gui/assets/img/birdnet_logo.png +0 -0
  29. birdnet_analyzer/gui/assets/img/birdnet_logo_no_transparent.png +0 -0
  30. birdnet_analyzer/gui/assets/img/clo-logo-bird.svg +1 -0
  31. birdnet_analyzer/gui/embeddings.py +620 -0
  32. birdnet_analyzer/gui/evaluation.py +813 -0
  33. birdnet_analyzer/gui/localization.py +68 -0
  34. birdnet_analyzer/gui/multi_file.py +246 -0
  35. birdnet_analyzer/gui/review.py +527 -0
  36. birdnet_analyzer/gui/segments.py +191 -0
  37. birdnet_analyzer/gui/settings.py +129 -0
  38. birdnet_analyzer/gui/single_file.py +269 -0
  39. birdnet_analyzer/gui/species.py +95 -0
  40. birdnet_analyzer/gui/train.py +698 -0
  41. birdnet_analyzer/gui/utils.py +808 -0
  42. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_af.txt +6522 -0
  43. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ar.txt +6522 -0
  44. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_bg.txt +6522 -0
  45. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ca.txt +6522 -0
  46. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_cs.txt +6522 -0
  47. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_da.txt +6522 -0
  48. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_de.txt +6522 -0
  49. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_el.txt +6522 -0
  50. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_en_uk.txt +6522 -0
  51. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_es.txt +6522 -0
  52. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_fi.txt +6522 -0
  53. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_fr.txt +6522 -0
  54. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_he.txt +6522 -0
  55. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_hr.txt +6522 -0
  56. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_hu.txt +6522 -0
  57. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_in.txt +6522 -0
  58. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_is.txt +6522 -0
  59. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_it.txt +6522 -0
  60. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ja.txt +6522 -0
  61. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ko.txt +6522 -0
  62. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_lt.txt +6522 -0
  63. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ml.txt +6522 -0
  64. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_nl.txt +6522 -0
  65. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_no.txt +6522 -0
  66. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_pl.txt +6522 -0
  67. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_pt_BR.txt +6522 -0
  68. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_pt_PT.txt +6522 -0
  69. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ro.txt +6522 -0
  70. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_ru.txt +6522 -0
  71. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sk.txt +6522 -0
  72. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sl.txt +6522 -0
  73. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sr.txt +6522 -0
  74. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_sv.txt +6522 -0
  75. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_th.txt +6522 -0
  76. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_tr.txt +6522 -0
  77. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_uk.txt +6522 -0
  78. birdnet_analyzer/labels/V2.4/BirdNET_GLOBAL_6K_V2.4_Labels_zh.txt +6522 -0
  79. birdnet_analyzer/lang/de.json +335 -0
  80. birdnet_analyzer/lang/en.json +335 -0
  81. birdnet_analyzer/lang/fi.json +335 -0
  82. birdnet_analyzer/lang/fr.json +335 -0
  83. birdnet_analyzer/lang/id.json +335 -0
  84. birdnet_analyzer/lang/pt-br.json +335 -0
  85. birdnet_analyzer/lang/ru.json +335 -0
  86. birdnet_analyzer/lang/se.json +335 -0
  87. birdnet_analyzer/lang/tlh.json +335 -0
  88. birdnet_analyzer/lang/zh_TW.json +335 -0
  89. birdnet_analyzer/model.py +1243 -0
  90. birdnet_analyzer/search/__init__.py +3 -0
  91. birdnet_analyzer/search/__main__.py +3 -0
  92. birdnet_analyzer/search/cli.py +12 -0
  93. birdnet_analyzer/search/core.py +78 -0
  94. birdnet_analyzer/search/utils.py +111 -0
  95. birdnet_analyzer/segments/__init__.py +3 -0
  96. birdnet_analyzer/segments/__main__.py +3 -0
  97. birdnet_analyzer/segments/cli.py +14 -0
  98. birdnet_analyzer/segments/core.py +78 -0
  99. birdnet_analyzer/segments/utils.py +394 -0
  100. birdnet_analyzer/species/__init__.py +3 -0
  101. birdnet_analyzer/species/__main__.py +3 -0
  102. birdnet_analyzer/species/cli.py +14 -0
  103. birdnet_analyzer/species/core.py +35 -0
  104. birdnet_analyzer/species/utils.py +75 -0
  105. birdnet_analyzer/train/__init__.py +3 -0
  106. birdnet_analyzer/train/__main__.py +3 -0
  107. birdnet_analyzer/train/cli.py +14 -0
  108. birdnet_analyzer/train/core.py +113 -0
  109. birdnet_analyzer/train/utils.py +847 -0
  110. birdnet_analyzer/translate.py +104 -0
  111. birdnet_analyzer/utils.py +419 -0
  112. birdnet_analyzer-2.0.0.dist-info/METADATA +129 -0
  113. birdnet_analyzer-2.0.0.dist-info/RECORD +117 -0
  114. birdnet_analyzer-2.0.0.dist-info/WHEEL +5 -0
  115. birdnet_analyzer-2.0.0.dist-info/entry_points.txt +11 -0
  116. birdnet_analyzer-2.0.0.dist-info/licenses/LICENSE +19 -0
  117. birdnet_analyzer-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,394 @@
1
+ """Extract segments from audio files based on BirdNET detections.
2
+
3
+ Can be used to save the segments of the audio files for each detection.
4
+ """
5
+
6
+ import os
7
+
8
+ import numpy as np
9
+
10
+ import birdnet_analyzer.audio as audio
11
+ import birdnet_analyzer.config as cfg
12
+ import birdnet_analyzer.utils as utils
13
+
14
+ # Set numpy random seed
15
+ np.random.seed(cfg.RANDOM_SEED)
16
+ SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
17
+
18
+
19
+ def detect_rtype(line: str):
20
+ """Detects the type of result file.
21
+
22
+ Args:
23
+ line: First line of text.
24
+
25
+ Returns:
26
+ Either "table", "kaleidoscope", "csv" or "audacity".
27
+ """
28
+ if line.lower().startswith("selection"):
29
+ return "table"
30
+ # elif line.lower().startswith("filepath"):
31
+ # return "r"
32
+ elif line.lower().startswith("indir"):
33
+ return "kaleidoscope"
34
+ elif line.lower().startswith("start (s)"):
35
+ return "csv"
36
+ else:
37
+ return "audacity"
38
+
39
+
40
+ def get_header_mapping(line: str) -> dict:
41
+ """
42
+ Parses a header line and returns a mapping of column names to their indices.
43
+
44
+ Args:
45
+ line (str): A string representing the header line of a file.
46
+
47
+ Returns:
48
+ dict: A dictionary where the keys are column names and the values are their respective indices.
49
+ """
50
+ rtype = detect_rtype(line)
51
+
52
+ if rtype == "table" or rtype == "audacity":
53
+ sep = "\t"
54
+ else:
55
+ sep = ","
56
+
57
+ cols = line.split(sep)
58
+
59
+ mapping = {}
60
+
61
+ for i, col in enumerate(cols):
62
+ mapping[col] = i
63
+
64
+ return mapping
65
+
66
+
67
+ def parse_folders(apath: str, rpath: str, allowed_result_filetypes: list[str] = ["txt", "csv"]) -> list[dict]:
68
+ """Read audio and result files.
69
+
70
+ Reads all audio files and BirdNET output inside directory recursively.
71
+
72
+ Args:
73
+ apath (str): Path to search for audio files.
74
+ rpath (str): Path to search for result files.
75
+ allowed_result_filetypes (list[str]): List of extensions for the result files.
76
+
77
+ Returns:
78
+ list[dict]: A list of {"audio": path_to_audio, "result": path_to_result }.
79
+ """
80
+ data = {}
81
+ apath = apath.replace("/", os.sep).replace("\\", os.sep)
82
+ rpath = rpath.replace("/", os.sep).replace("\\", os.sep)
83
+
84
+ # Check if combined selection table is present and read that.
85
+ if os.path.exists(os.path.join(rpath, cfg.OUTPUT_RAVEN_FILENAME)):
86
+ # Read combined Raven selection table
87
+ rfile = os.path.join(rpath, cfg.OUTPUT_RAVEN_FILENAME)
88
+ data["combined"] = {"isCombinedFile": True, "result": rfile}
89
+ elif os.path.exists(os.path.join(rpath, cfg.OUTPUT_CSV_FILENAME)):
90
+ rfile = os.path.join(rpath, cfg.OUTPUT_CSV_FILENAME)
91
+ data["combined"] = {"isCombinedFile": True, "result": rfile}
92
+ elif os.path.exists(os.path.join(rpath, cfg.OUTPUT_KALEIDOSCOPE_FILENAME)):
93
+ rfile = os.path.join(rpath, cfg.OUTPUT_KALEIDOSCOPE_FILENAME)
94
+ data["combined"] = {"isCombinedFile": True, "result": rfile}
95
+ else:
96
+ # Get all audio files
97
+ for root, _, files in os.walk(apath):
98
+ for f in files:
99
+ if f.rsplit(".", 1)[-1].lower() in cfg.ALLOWED_FILETYPES and not f.startswith("."):
100
+ table_key = os.path.join(root.strip(apath), f.rsplit(".", 1)[0])
101
+ data[table_key] = {"audio": os.path.join(root, f), "result": ""}
102
+
103
+ # Get all result files
104
+ for root, _, files in os.walk(rpath):
105
+ for f in files:
106
+ if f.rsplit(".", 1)[-1] in allowed_result_filetypes and ".BirdNET." in f:
107
+ table_key = os.path.join(root.strip(rpath), f.split(".BirdNET.", 1)[0])
108
+ if table_key in data:
109
+ data[table_key]["result"] = os.path.join(root, f)
110
+
111
+ # Convert to list
112
+ flist = [f for f in data.values() if f["result"]]
113
+
114
+ print(f"Found {len(flist)} audio files with valid result file.")
115
+
116
+ return flist
117
+
118
+
119
+ def parse_files(flist: list[dict], max_segments=100):
120
+ """
121
+ Parses a list of files to extract and organize bird call segments by species.
122
+
123
+ Args:
124
+ flist (list[dict]): A list of dictionaries, each containing 'audio' and 'result' file paths.
125
+ Optionally, a dictionary can have 'isCombinedFile' set to True to indicate
126
+ that it is a combined result file.
127
+ max_segments (int, optional): The maximum number of segments to retain per species. Defaults to 100.
128
+ Returns:
129
+ list[tuple]: A list of tuples where each tuple contains an audio file path and a list of segments
130
+ associated with that audio file.
131
+ Raises:
132
+ KeyError: If the dictionaries in flist do not contain the required keys ('audio' and 'result').
133
+ Example:
134
+ flist = [
135
+ {"audio": "path/to/audio1.wav", "result": "path/to/result1.csv"},
136
+ {"audio": "path/to/audio2.wav", "result": "path/to/result2.csv"}
137
+ ]
138
+ segments = parseFiles(flist, max_segments=50)
139
+ """
140
+ species_segments: dict[str, list] = {}
141
+
142
+ is_combined_rfile = len(flist) == 1 and flist[0].get("isCombinedFile", False)
143
+
144
+ if is_combined_rfile:
145
+ rfile = flist[0]["result"]
146
+ segments = find_segments_from_combined(rfile)
147
+
148
+ # Parse segments by species
149
+ for s in segments:
150
+ if s["species"] not in species_segments:
151
+ species_segments[s["species"]] = []
152
+
153
+ species_segments[s["species"]].append(s)
154
+ else:
155
+ for f in flist:
156
+ # Paths
157
+ afile = f["audio"]
158
+ rfile = f["result"]
159
+
160
+ # Get all segments for result file
161
+ segments = find_segments(afile, rfile)
162
+
163
+ # Parse segments by species
164
+ for s in segments:
165
+ if s["species"] not in species_segments:
166
+ species_segments[s["species"]] = []
167
+
168
+ species_segments[s["species"]].append(s)
169
+
170
+ # Shuffle segments for each species and limit to max_segments
171
+ for s in species_segments:
172
+ np.random.shuffle(species_segments[s])
173
+ species_segments[s] = species_segments[s][:max_segments]
174
+
175
+ # Make dict of segments per audio file
176
+ segments: dict[str, list] = {}
177
+ seg_cnt = 0
178
+
179
+ for s in species_segments:
180
+ for seg in species_segments[s]:
181
+ if seg["audio"] not in segments:
182
+ segments[seg["audio"]] = []
183
+
184
+ segments[seg["audio"]].append(seg)
185
+ seg_cnt += 1
186
+
187
+ print(f"Found {seg_cnt} segments in {len(segments)} audio files.")
188
+
189
+ # Convert to list
190
+ flist = [tuple(e) for e in segments.items()]
191
+
192
+ return flist
193
+
194
+
195
+ def find_segments_from_combined(rfile: str) -> list[dict]:
196
+ """Extracts the segments from a combined results file
197
+
198
+ Args:
199
+ rfile (str): Path to the result file.
200
+
201
+ Returns:
202
+ list[dict]: A list of dicts in the form of
203
+ {"audio": afile, "start": start, "end": end, "species": species, "confidence": confidence}
204
+ """
205
+ segments: list[dict] = []
206
+
207
+ # Open and parse result file
208
+ lines = utils.read_lines(rfile)
209
+
210
+ # Auto-detect result type
211
+ rtype = detect_rtype(lines[0])
212
+
213
+ if rtype == "audacity":
214
+ raise Exception("Audacity files are not supported for combined results.")
215
+
216
+ # Get mapping from the header column
217
+ header_mapping = get_header_mapping(lines[0])
218
+
219
+ # Get start and end times based on rtype
220
+ confidence = 0
221
+ start = end = 0.0
222
+ species = ""
223
+ afile = ""
224
+
225
+ for i, line in enumerate(lines):
226
+ if rtype == "table" and i > 0:
227
+ d = line.split("\t")
228
+ file_offset = float(d[header_mapping["File Offset (s)"]])
229
+ start = file_offset
230
+ end = file_offset + (float(d[header_mapping["End Time (s)"]]) - float(d[header_mapping["Begin Time (s)"]]))
231
+ species = d[header_mapping["Common Name"]]
232
+ confidence = float(d[header_mapping["Confidence"]])
233
+ afile = d[header_mapping["Begin Path"]].replace("/", os.sep).replace("\\", os.sep)
234
+
235
+ elif rtype == "kaleidoscope" and i > 0:
236
+ d = line.split(",")
237
+ start = float(d[header_mapping["OFFSET"]])
238
+ end = float(d[header_mapping["DURATION"]]) + start
239
+ species = d[header_mapping["scientific_name"]]
240
+ confidence = float(d[header_mapping["confidence"]])
241
+ in_dir = d[header_mapping["INDIR"]]
242
+ folder = d[header_mapping["FOLDER"]]
243
+ in_file = d[header_mapping["IN FILE"]]
244
+ afile = os.path.join(in_dir, folder, in_file).replace("/", os.sep).replace("\\", os.sep)
245
+
246
+ elif rtype == "csv" and i > 0:
247
+ d = line.split(",")
248
+ start = float(d[header_mapping["Start (s)"]])
249
+ end = float(d[header_mapping["End (s)"]])
250
+ species = d[header_mapping["Common name"]]
251
+ confidence = float(d[header_mapping["Confidence"]])
252
+ afile = d[header_mapping["File"]].replace("/", os.sep).replace("\\", os.sep)
253
+
254
+ # Check if confidence is high enough and label is not "nocall"
255
+ if confidence >= cfg.MIN_CONFIDENCE and species.lower() != "nocall" and afile:
256
+ segments.append({"audio": afile, "start": start, "end": end, "species": species, "confidence": confidence})
257
+
258
+ return segments
259
+
260
+
261
+ def find_segments(afile: str, rfile: str):
262
+ """Extracts the segments for an audio file from the results file
263
+
264
+ Args:
265
+ afile: Path to the audio file.
266
+ rfile: Path to the result file.
267
+
268
+ Returns:
269
+ A list of dicts in the form of
270
+ {"audio": afile, "start": start, "end": end, "species": species, "confidence": confidence}
271
+ """
272
+ segments: list[dict] = []
273
+
274
+ # Open and parse result file
275
+ lines = utils.read_lines(rfile)
276
+
277
+ # Auto-detect result type
278
+ rtype = detect_rtype(lines[0])
279
+
280
+ # Get mapping from the header column
281
+ header_mapping = get_header_mapping(lines[0])
282
+
283
+ # Get start and end times based on rtype
284
+ confidence = 0
285
+ start = end = 0.0
286
+ species = ""
287
+
288
+ for i, line in enumerate(lines):
289
+ if rtype == "table" and i > 0:
290
+ d = line.split("\t")
291
+ start = float(d[header_mapping["Begin Time (s)"]])
292
+ end = float(d[header_mapping["End Time (s)"]])
293
+ species = d[header_mapping["Common Name"]]
294
+ confidence = float(d[header_mapping["Confidence"]])
295
+
296
+ elif rtype == "audacity":
297
+ d = line.split("\t")
298
+ start = float(d[0])
299
+ end = float(d[1])
300
+ species = d[2].split(", ")[1]
301
+ confidence = float(d[-1])
302
+
303
+ elif rtype == "kaleidoscope" and i > 0:
304
+ d = line.split(",")
305
+ start = float(d[header_mapping["OFFSET"]])
306
+ end = float(d[header_mapping["DURATION"]]) + start
307
+ species = d[header_mapping["scientific_name"]]
308
+ confidence = float(d[header_mapping["confidence"]])
309
+
310
+ elif rtype == "csv" and i > 0:
311
+ d = line.split(",")
312
+ start = float(d[header_mapping["Start (s)"]])
313
+ end = float(d[header_mapping["End (s)"]])
314
+ species = d[header_mapping["Common name"]]
315
+ confidence = float(d[header_mapping["Confidence"]])
316
+
317
+ # Check if confidence is high enough and label is not "nocall"
318
+ if confidence >= cfg.MIN_CONFIDENCE and species.lower() != "nocall":
319
+ segments.append({"audio": afile, "start": start, "end": end, "species": species, "confidence": confidence})
320
+
321
+ return segments
322
+
323
+
324
+ def extract_segments(item: tuple[tuple[str, list[dict]], float, dict[str]]):
325
+ """
326
+ Extracts audio segments from a given audio file based on provided segment information.
327
+ Args:
328
+ item (tuple): A tuple containing:
329
+ - A tuple with:
330
+ - A string representing the path to the audio file.
331
+ - A list of dictionaries, each containing segment information with keys "start", "end", "species", "confidence", and "audio".
332
+ - A float representing the segment length.
333
+ - A dictionary containing configuration settings.
334
+ Returns:
335
+ bool: True if segments were successfully extracted, False otherwise.
336
+ Raises:
337
+ Exception: If there is an error opening the audio file or extracting segments.
338
+ """
339
+ # Paths and config
340
+ afile = item[0][0]
341
+ segments = item[0][1]
342
+ seg_length = item[1]
343
+ cfg.set_config(item[2])
344
+
345
+ # Status
346
+ print(f"Extracting segments from {afile}")
347
+
348
+ try:
349
+ # Open audio file
350
+ sig, rate = audio.open_audio_file(afile, cfg.SAMPLE_RATE, speed=cfg.AUDIO_SPEED)
351
+ except Exception as ex:
352
+ print(f"Error: Cannot open audio file {afile}", flush=True)
353
+ utils.write_error_log(ex)
354
+
355
+ return
356
+
357
+ # Extract segments
358
+ for seg_cnt, seg in enumerate(segments, 1):
359
+ try:
360
+ # Get start and end times
361
+ start = int((seg["start"] * rate) / cfg.AUDIO_SPEED)
362
+ end = int((seg["end"] * rate) / cfg.AUDIO_SPEED)
363
+
364
+ offset = max(0, ((seg_length * rate) - (end - start)) // 2)
365
+ start = max(0, start - offset)
366
+ end = min(len(sig), end + offset)
367
+
368
+ # Make sure segment is long enough
369
+ if end > start:
370
+ # Get segment raw audio from signal
371
+ seg_sig = sig[int(start) : int(end)]
372
+
373
+ # Make output path
374
+ outpath = os.path.join(cfg.OUTPUT_PATH, seg["species"])
375
+ os.makedirs(outpath, exist_ok=True)
376
+
377
+ # Save segment
378
+ seg_name = "{:.3f}_{}_{}_{:.1f}s_{:.1f}s.wav".format(
379
+ seg["confidence"],
380
+ seg_cnt,
381
+ seg["audio"].rsplit(os.sep, 1)[-1].rsplit(".", 1)[0],
382
+ seg["start"],
383
+ seg["end"],
384
+ )
385
+ seg_path = os.path.join(outpath, seg_name)
386
+ audio.save_signal(seg_sig, seg_path, rate)
387
+
388
+ except Exception as ex:
389
+ # Write error log
390
+ print(f"Error: Cannot extract segments from {afile}.", flush=True)
391
+ utils.write_error_log(ex)
392
+ return False
393
+
394
+ return True
@@ -0,0 +1,3 @@
1
+ from birdnet_analyzer.species.core import species
2
+
3
+ __all__ = ["species"]
@@ -0,0 +1,3 @@
1
+ from birdnet_analyzer.species.cli import main
2
+
3
+ main()
@@ -0,0 +1,14 @@
1
+ from birdnet_analyzer.utils import runtime_error_handler
2
+
3
+
4
+ @runtime_error_handler
5
+ def main():
6
+ import birdnet_analyzer.cli as cli
7
+ from birdnet_analyzer import species
8
+
9
+ # Parse arguments
10
+ parser = cli.species_parser()
11
+
12
+ args = parser.parse_args()
13
+
14
+ species(**vars(args))
@@ -0,0 +1,35 @@
1
+ from typing import Literal
2
+
3
+
4
+ def species(
5
+ output: str,
6
+ *,
7
+ lat: float = -1,
8
+ lon: float = -1,
9
+ week: int = -1,
10
+ sf_thresh: float = 0.03,
11
+ sortby: Literal["freq", "alpha"] = "freq",
12
+ ):
13
+ """
14
+ Retrieves and processes species data based on the provided parameters.
15
+ Args:
16
+ output (str): The output directory or file path where the results will be stored.
17
+ lat (float, optional): Latitude of the location for species filtering. Defaults to -1 (no filtering by location).
18
+ lon (float, optional): Longitude of the location for species filtering. Defaults to -1 (no filtering by location).
19
+ week (int, optional): Week of the year for species filtering. Defaults to -1 (no filtering by time).
20
+ sf_thresh (float, optional): Species frequency threshold for filtering. Defaults to 0.03.
21
+ sortby (Literal["freq", "alpha"], optional): Sorting method for the species list.
22
+ "freq" sorts by frequency, and "alpha" sorts alphabetically. Defaults to "freq".
23
+ Raises:
24
+ FileNotFoundError: If the required model files are not found.
25
+ ValueError: If invalid parameters are provided.
26
+ Notes:
27
+ This function ensures that the required model files exist before processing.
28
+ It delegates the main processing to the `run` function from `birdnet_analyzer.species.utils`.
29
+ """
30
+ from birdnet_analyzer.species.utils import run
31
+ from birdnet_analyzer.utils import ensure_model_exists
32
+
33
+ ensure_model_exists()
34
+
35
+ run(output, lat, lon, week, sf_thresh, sortby)
@@ -0,0 +1,75 @@
1
+ """Module for predicting a species list.
2
+
3
+ Can be used to predict a species list using coordinates and weeks.
4
+ """
5
+
6
+ import os
7
+
8
+ import birdnet_analyzer.config as cfg
9
+ import birdnet_analyzer.model as model
10
+ import birdnet_analyzer.utils as utils
11
+
12
+
13
+ def get_species_list(lat: float, lon: float, week: int, threshold=0.05, sort=False) -> list[str]:
14
+ """Predict a species list.
15
+
16
+ Uses the model to predict the species list for the given coordinates and filters by threshold.
17
+
18
+ Args:
19
+ lat: The latitude.
20
+ lon: The longitude.
21
+ week: The week of the year [1-48]. Use -1 for year-round.
22
+ threshold: Only values above or equal to threshold will be shown.
23
+ sort: If the species list should be sorted.
24
+
25
+ Returns:
26
+ A list of all eligible species.
27
+ """
28
+ # Extract species from model
29
+ pred = model.explore(lat, lon, week)
30
+
31
+ # Make species list
32
+ slist = [p[1] for p in pred if p[0] >= threshold]
33
+
34
+ return sorted(slist) if sort else slist
35
+
36
+
37
+ def run(output_path, lat, lon, week, threshold, sortby):
38
+ """
39
+ Generates a species list for a given location and time, and saves it to the specified output path.
40
+ Args:
41
+ output_path (str): The path where the species list will be saved. If it's a directory, the list will be saved as "species_list.txt" inside it.
42
+ lat (float): Latitude of the location.
43
+ lon (float): Longitude of the location.
44
+ week (int): Week of the year (1-52) for which the species list is generated.
45
+ threshold (float): Threshold for location filtering.
46
+ sortby (str): Sorting criteria for the species list. Can be "freq" for frequency or any other value for alphabetical sorting.
47
+ Returns:
48
+ None
49
+ """
50
+ # Load eBird codes, labels
51
+ cfg.LABELS = utils.read_lines(cfg.LABELS_FILE)
52
+
53
+ # Set output path
54
+ cfg.OUTPUT_PATH = output_path
55
+
56
+ if os.path.isdir(cfg.OUTPUT_PATH):
57
+ cfg.OUTPUT_PATH = os.path.join(cfg.OUTPUT_PATH, "species_list.txt")
58
+
59
+ # Set config
60
+ cfg.LATITUDE, cfg.LONGITUDE, cfg.WEEK = lat, lon, week
61
+ cfg.LOCATION_FILTER_THRESHOLD = threshold
62
+
63
+ print(f"Getting species list for {cfg.LATITUDE}/{cfg.LONGITUDE}, Week {cfg.WEEK}...", end="", flush=True)
64
+
65
+ # Get species list
66
+ species_list = get_species_list(
67
+ cfg.LATITUDE, cfg.LONGITUDE, cfg.WEEK, cfg.LOCATION_FILTER_THRESHOLD, False if sortby == "freq" else True
68
+ )
69
+
70
+ print(f"Done. {len(species_list)} species on list.", flush=True)
71
+
72
+ # Save species list
73
+ with open(cfg.OUTPUT_PATH, "w") as f:
74
+ for s in species_list:
75
+ f.write(s + "\n")
@@ -0,0 +1,3 @@
1
+ from birdnet_analyzer.train.core import train
2
+
3
+ __all__ = ["train"]
@@ -0,0 +1,3 @@
1
+ from birdnet_analyzer.train.cli import main
2
+
3
+ main()
@@ -0,0 +1,14 @@
1
+ from birdnet_analyzer.utils import runtime_error_handler
2
+
3
+
4
+ @runtime_error_handler
5
+ def main():
6
+ import birdnet_analyzer.cli as cli
7
+ from birdnet_analyzer import train
8
+
9
+ # Parse arguments
10
+ parser = cli.train_parser()
11
+
12
+ args = parser.parse_args()
13
+
14
+ train(**vars(args))
@@ -0,0 +1,113 @@
1
+ from typing import Literal
2
+
3
+
4
+ def train(
5
+ input: str,
6
+ output: str = "checkpoints/custom/Custom_Classifier",
7
+ test_data: str = None,
8
+ *,
9
+ crop_mode: Literal["center", "first", "segments"] = "center",
10
+ overlap: float = 0.0,
11
+ epochs: int = 50,
12
+ batch_size: int = 32,
13
+ val_split: float = 0.2,
14
+ learning_rate: float = 0.0001,
15
+ use_focal_loss: bool = False,
16
+ focal_loss_gamma: float = 2.0,
17
+ focal_loss_alpha: float = 0.25,
18
+ hidden_units: int = 0,
19
+ dropout: float = 0.0,
20
+ label_smoothing: bool = False,
21
+ mixup: bool = False,
22
+ upsampling_ratio: float = 0.0,
23
+ upsampling_mode: Literal["repeat", "mean", "smote"] = "repeat",
24
+ model_format: Literal["tflite", "raven", "both"] = "tflite",
25
+ model_save_mode: Literal["replace", "append"] = "replace",
26
+ cache_mode: Literal["load", "save"] | None = None,
27
+ cache_file: str = "train_cache.npz",
28
+ threads: int = 1,
29
+ fmin: float = 0.0,
30
+ fmax: float = 15000.0,
31
+ audio_speed: float = 1.0,
32
+ autotune: bool = False,
33
+ autotune_trials: int = 50,
34
+ autotune_executions_per_trial: int = 1,
35
+ ):
36
+ """
37
+ Trains a custom classifier model using the BirdNET-Analyzer framework.
38
+ Args:
39
+ input (str): Path to the training data directory.
40
+ test_data (str, optional): Path to the test data directory. Defaults to None. If not specified, a validation split will be used.
41
+ output (str, optional): Path to save the trained model. Defaults to "checkpoints/custom/Custom_Classifier".
42
+ crop_mode (Literal["center", "first", "segments", "smart"], optional): Mode for cropping audio samples. Defaults to "center".
43
+ overlap (float, optional): Overlap ratio for audio segments. Defaults to 0.0.
44
+ epochs (int, optional): Number of training epochs. Defaults to 50.
45
+ batch_size (int, optional): Batch size for training. Defaults to 32.
46
+ val_split (float, optional): Fraction of data to use for validation. Defaults to 0.2.
47
+ learning_rate (float, optional): Learning rate for the optimizer. Defaults to 0.0001.
48
+ use_focal_loss (bool, optional): Whether to use focal loss for training. Defaults to False.
49
+ focal_loss_gamma (float, optional): Gamma parameter for focal loss. Defaults to 2.0.
50
+ focal_loss_alpha (float, optional): Alpha parameter for focal loss. Defaults to 0.25.
51
+ hidden_units (int, optional): Number of hidden units in the model. Defaults to 0.
52
+ dropout (float, optional): Dropout rate for regularization. Defaults to 0.0.
53
+ label_smoothing (bool, optional): Whether to use label smoothing. Defaults to False.
54
+ mixup (bool, optional): Whether to use mixup data augmentation. Defaults to False.
55
+ upsampling_ratio (float, optional): Ratio for upsampling underrepresented classes. Defaults to 0.0.
56
+ upsampling_mode (Literal["repeat", "mean", "smote"], optional): Mode for upsampling. Defaults to "repeat".
57
+ model_format (Literal["tflite", "raven", "both"], optional): Format to save the trained model. Defaults to "tflite".
58
+ model_save_mode (Literal["replace", "append"], optional): Save mode for the model. Defaults to "replace".
59
+ cache_mode (Literal["load", "save"] | None, optional): Cache mode for training data. Defaults to None.
60
+ cache_file (str, optional): Path to the cache file. Defaults to "train_cache.npz".
61
+ threads (int, optional): Number of CPU threads to use. Defaults to 1.
62
+ fmin (float, optional): Minimum frequency for bandpass filtering. Defaults to 0.0.
63
+ fmax (float, optional): Maximum frequency for bandpass filtering. Defaults to 15000.0.
64
+ audio_speed (float, optional): Speed factor for audio playback. Defaults to 1.0.
65
+ autotune (bool, optional): Whether to use hyperparameter autotuning. Defaults to False.
66
+ autotune_trials (int, optional): Number of trials for autotuning. Defaults to 50.
67
+ autotune_executions_per_trial (int, optional): Number of executions per autotuning trial. Defaults to 1.
68
+ Returns:
69
+ None
70
+ """
71
+ from birdnet_analyzer.train.utils import train_model
72
+ import birdnet_analyzer.config as cfg
73
+ from birdnet_analyzer.utils import ensure_model_exists
74
+
75
+ ensure_model_exists()
76
+
77
+ # Config
78
+ cfg.TRAIN_DATA_PATH = input
79
+ cfg.TEST_DATA_PATH = test_data
80
+ cfg.SAMPLE_CROP_MODE = crop_mode
81
+ cfg.SIG_OVERLAP = overlap
82
+ cfg.CUSTOM_CLASSIFIER = output
83
+ cfg.TRAIN_EPOCHS = epochs
84
+ cfg.TRAIN_BATCH_SIZE = batch_size
85
+ cfg.TRAIN_VAL_SPLIT = val_split
86
+ cfg.TRAIN_LEARNING_RATE = learning_rate
87
+ cfg.TRAIN_WITH_FOCAL_LOSS = use_focal_loss if use_focal_loss is not None else cfg.TRAIN_WITH_FOCAL_LOSS
88
+ cfg.FOCAL_LOSS_GAMMA = focal_loss_gamma
89
+ cfg.FOCAL_LOSS_ALPHA = focal_loss_alpha
90
+ cfg.TRAIN_HIDDEN_UNITS = hidden_units
91
+ cfg.TRAIN_DROPOUT = dropout
92
+ cfg.TRAIN_WITH_LABEL_SMOOTHING = label_smoothing if label_smoothing is not None else cfg.TRAIN_WITH_LABEL_SMOOTHING
93
+ cfg.TRAIN_WITH_MIXUP = mixup if mixup is not None else cfg.TRAIN_WITH_MIXUP
94
+ cfg.UPSAMPLING_RATIO = upsampling_ratio
95
+ cfg.UPSAMPLING_MODE = upsampling_mode
96
+ cfg.TRAINED_MODEL_OUTPUT_FORMAT = model_format
97
+ cfg.TRAINED_MODEL_SAVE_MODE = model_save_mode
98
+ cfg.TRAIN_CACHE_MODE = cache_mode
99
+ cfg.TRAIN_CACHE_FILE = cache_file
100
+ cfg.TFLITE_THREADS = 1
101
+ cfg.CPU_THREADS = threads
102
+
103
+ cfg.BANDPASS_FMIN = fmin
104
+ cfg.BANDPASS_FMAX = fmax
105
+
106
+ cfg.AUDIO_SPEED = audio_speed
107
+
108
+ cfg.AUTOTUNE = autotune
109
+ cfg.AUTOTUNE_TRIALS = autotune_trials
110
+ cfg.AUTOTUNE_EXECUTIONS_PER_TRIAL = autotune_executions_per_trial
111
+
112
+ # Train model
113
+ train_model()