dataset-toolkit 0.1.1__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dataset_toolkit-0.1.1/dataset_toolkit.egg-info → dataset_toolkit-0.2.0}/PKG-INFO +39 -1
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/README.md +38 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/__init__.py +16 -2
- dataset_toolkit-0.2.0/dataset_toolkit/exporters/yolo_exporter.py +157 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/loaders/local_loader.py +145 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/models.py +3 -1
- dataset_toolkit-0.2.0/dataset_toolkit/processors/__init__.py +9 -0
- dataset_toolkit-0.2.0/dataset_toolkit/processors/evaluator.py +535 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0/dataset_toolkit.egg-info}/PKG-INFO +39 -1
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit.egg-info/SOURCES.txt +3 -0
- dataset_toolkit-0.2.0/examples/evaluation_example.py +250 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/pyproject.toml +1 -1
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/setup.py +1 -1
- dataset_toolkit-0.1.1/dataset_toolkit/utils/__init__.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/LICENSE +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/MANIFEST.in +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/exporters/__init__.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/exporters/coco_exporter.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/exporters/txt_exporter.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/loaders/__init__.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/pipeline.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/processors/merger.py +0 -0
- {dataset_toolkit-0.1.1/dataset_toolkit/processors → dataset_toolkit-0.2.0/dataset_toolkit/utils}/__init__.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit/utils/coords.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit.egg-info/dependency_links.txt +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit.egg-info/requires.txt +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/dataset_toolkit.egg-info/top_level.txt +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/examples/basic_usage.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/requirements.txt +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/setup.cfg +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/tests/__init__.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/tests/conftest.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/tests/test_exporters.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/tests/test_loaders.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/tests/test_processors.py +0 -0
- {dataset_toolkit-0.1.1 → dataset_toolkit-0.2.0}/tests/test_pypi_test.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: dataset-toolkit
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.2.0
|
4
4
|
Summary: 一个用于加载、处理和导出计算机视觉数据集的工具包
|
5
5
|
Home-page: https://github.com/yourusername/dataset-toolkit
|
6
6
|
Author: wenxiang.han
|
@@ -42,6 +42,7 @@ Dynamic: requires-python
|
|
42
42
|
- 📤 **灵活导出**:导出为 COCO JSON、TXT 等多种格式
|
43
43
|
- 🛠️ **工具函数**:提供坐标转换等实用工具
|
44
44
|
- 📦 **标准化数据模型**:统一的内部数据表示,方便扩展
|
45
|
+
- 📊 **模型评估**:完整的目标检测模型评估系统(v0.2.0+)
|
45
46
|
|
46
47
|
## 📦 安装
|
47
48
|
|
@@ -121,6 +122,43 @@ result = (pipeline
|
|
121
122
|
.execute())
|
122
123
|
```
|
123
124
|
|
125
|
+
### 模型评估(v0.2.0+)
|
126
|
+
|
127
|
+
```python
|
128
|
+
from dataset_toolkit import (
|
129
|
+
load_yolo_from_local,
|
130
|
+
load_predictions_from_streamlined,
|
131
|
+
Evaluator
|
132
|
+
)
|
133
|
+
|
134
|
+
# 1. 加载GT和预测结果
|
135
|
+
gt_dataset = load_yolo_from_local("/data/test/labels", {0: 'parcel'})
|
136
|
+
pred_dataset = load_predictions_from_streamlined(
|
137
|
+
"/results/predictions",
|
138
|
+
categories={0: 'parcel'},
|
139
|
+
image_dir="/data/test/images"
|
140
|
+
)
|
141
|
+
|
142
|
+
# 2. 创建评估器
|
143
|
+
evaluator = Evaluator(
|
144
|
+
positive_gt=gt_dataset,
|
145
|
+
positive_pred=pred_dataset,
|
146
|
+
iou_threshold=0.5
|
147
|
+
)
|
148
|
+
|
149
|
+
# 3. 计算指标
|
150
|
+
metrics = evaluator.calculate_metrics(confidence_threshold=0.5)
|
151
|
+
print(f"Precision: {metrics['precision']:.4f}")
|
152
|
+
print(f"Recall: {metrics['recall']:.4f}")
|
153
|
+
print(f"F1-Score: {metrics['f1']:.4f}")
|
154
|
+
|
155
|
+
# 4. 寻找最优阈值
|
156
|
+
optimal = evaluator.find_optimal_threshold(metric='f1')
|
157
|
+
print(f"最优阈值: {optimal['optimal_threshold']}")
|
158
|
+
```
|
159
|
+
|
160
|
+
详细文档请参考 [EVALUATION_GUIDE.md](EVALUATION_GUIDE.md)
|
161
|
+
|
124
162
|
## 📚 API 文档
|
125
163
|
|
126
164
|
### 数据加载器
|
@@ -9,6 +9,7 @@
|
|
9
9
|
- 📤 **灵活导出**:导出为 COCO JSON、TXT 等多种格式
|
10
10
|
- 🛠️ **工具函数**:提供坐标转换等实用工具
|
11
11
|
- 📦 **标准化数据模型**:统一的内部数据表示,方便扩展
|
12
|
+
- 📊 **模型评估**:完整的目标检测模型评估系统(v0.2.0+)
|
12
13
|
|
13
14
|
## 📦 安装
|
14
15
|
|
@@ -88,6 +89,43 @@ result = (pipeline
|
|
88
89
|
.execute())
|
89
90
|
```
|
90
91
|
|
92
|
+
### 模型评估(v0.2.0+)
|
93
|
+
|
94
|
+
```python
|
95
|
+
from dataset_toolkit import (
|
96
|
+
load_yolo_from_local,
|
97
|
+
load_predictions_from_streamlined,
|
98
|
+
Evaluator
|
99
|
+
)
|
100
|
+
|
101
|
+
# 1. 加载GT和预测结果
|
102
|
+
gt_dataset = load_yolo_from_local("/data/test/labels", {0: 'parcel'})
|
103
|
+
pred_dataset = load_predictions_from_streamlined(
|
104
|
+
"/results/predictions",
|
105
|
+
categories={0: 'parcel'},
|
106
|
+
image_dir="/data/test/images"
|
107
|
+
)
|
108
|
+
|
109
|
+
# 2. 创建评估器
|
110
|
+
evaluator = Evaluator(
|
111
|
+
positive_gt=gt_dataset,
|
112
|
+
positive_pred=pred_dataset,
|
113
|
+
iou_threshold=0.5
|
114
|
+
)
|
115
|
+
|
116
|
+
# 3. 计算指标
|
117
|
+
metrics = evaluator.calculate_metrics(confidence_threshold=0.5)
|
118
|
+
print(f"Precision: {metrics['precision']:.4f}")
|
119
|
+
print(f"Recall: {metrics['recall']:.4f}")
|
120
|
+
print(f"F1-Score: {metrics['f1']:.4f}")
|
121
|
+
|
122
|
+
# 4. 寻找最优阈值
|
123
|
+
optimal = evaluator.find_optimal_threshold(metric='f1')
|
124
|
+
print(f"最优阈值: {optimal['optimal_threshold']}")
|
125
|
+
```
|
126
|
+
|
127
|
+
详细文档请参考 [EVALUATION_GUIDE.md](EVALUATION_GUIDE.md)
|
128
|
+
|
91
129
|
## 📚 API 文档
|
92
130
|
|
93
131
|
### 数据加载器
|
@@ -15,7 +15,7 @@ Dataset Toolkit - 计算机视觉数据集处理工具包
|
|
15
15
|
>>> export_to_coco(dataset, "output.json")
|
16
16
|
"""
|
17
17
|
|
18
|
-
__version__ = "0.
|
18
|
+
__version__ = "0.2.0"
|
19
19
|
__author__ = "wenxiang.han"
|
20
20
|
__email__ = "wenxiang.han@anker-in.com"
|
21
21
|
|
@@ -28,13 +28,18 @@ from dataset_toolkit.models import (
|
|
28
28
|
|
29
29
|
from dataset_toolkit.loaders.local_loader import (
|
30
30
|
load_yolo_from_local,
|
31
|
-
load_csv_result_from_local
|
31
|
+
load_csv_result_from_local,
|
32
|
+
load_predictions_from_streamlined
|
32
33
|
)
|
33
34
|
|
34
35
|
from dataset_toolkit.processors.merger import (
|
35
36
|
merge_datasets
|
36
37
|
)
|
37
38
|
|
39
|
+
from dataset_toolkit.processors.evaluator import (
|
40
|
+
Evaluator
|
41
|
+
)
|
42
|
+
|
38
43
|
from dataset_toolkit.exporters.coco_exporter import (
|
39
44
|
export_to_coco
|
40
45
|
)
|
@@ -43,6 +48,11 @@ from dataset_toolkit.exporters.txt_exporter import (
|
|
43
48
|
export_to_txt
|
44
49
|
)
|
45
50
|
|
51
|
+
from dataset_toolkit.exporters.yolo_exporter import (
|
52
|
+
export_to_yolo_format,
|
53
|
+
export_to_yolo_and_txt
|
54
|
+
)
|
55
|
+
|
46
56
|
from dataset_toolkit.utils.coords import (
|
47
57
|
yolo_to_absolute_bbox
|
48
58
|
)
|
@@ -64,13 +74,17 @@ __all__ = [
|
|
64
74
|
# 加载器
|
65
75
|
"load_yolo_from_local",
|
66
76
|
"load_csv_result_from_local",
|
77
|
+
"load_predictions_from_streamlined",
|
67
78
|
|
68
79
|
# 处理器
|
69
80
|
"merge_datasets",
|
81
|
+
"Evaluator",
|
70
82
|
|
71
83
|
# 导出器
|
72
84
|
"export_to_coco",
|
73
85
|
"export_to_txt",
|
86
|
+
"export_to_yolo_format",
|
87
|
+
"export_to_yolo_and_txt",
|
74
88
|
|
75
89
|
# 工具函数
|
76
90
|
"yolo_to_absolute_bbox",
|
@@ -0,0 +1,157 @@
|
|
1
|
+
# dataset_toolkit/exporters/yolo_exporter.py
|
2
|
+
"""
|
3
|
+
导出为 YOLO 格式(完整的 images/ + labels/ 目录结构)
|
4
|
+
"""
|
5
|
+
import os
|
6
|
+
from pathlib import Path
|
7
|
+
from typing import Optional
|
8
|
+
|
9
|
+
|
10
|
+
def export_to_yolo_format(
|
11
|
+
dataset,
|
12
|
+
output_dir: str,
|
13
|
+
use_symlinks: bool = True,
|
14
|
+
overwrite: bool = False
|
15
|
+
):
|
16
|
+
"""
|
17
|
+
导出数据集为完整的 YOLO 格式目录结构
|
18
|
+
|
19
|
+
参数:
|
20
|
+
dataset: Dataset 对象
|
21
|
+
output_dir: 输出目录路径
|
22
|
+
use_symlinks: 是否使用软链接(True)或复制文件(False)
|
23
|
+
overwrite: 是否覆盖已存在的文件
|
24
|
+
|
25
|
+
输出结构:
|
26
|
+
output_dir/
|
27
|
+
├── images/
|
28
|
+
│ ├── img1.jpg
|
29
|
+
│ └── img2.jpg
|
30
|
+
└── labels/
|
31
|
+
├── img1.txt
|
32
|
+
└── img2.txt
|
33
|
+
"""
|
34
|
+
output_path = Path(output_dir)
|
35
|
+
images_dir = output_path / 'images'
|
36
|
+
labels_dir = output_path / 'labels'
|
37
|
+
|
38
|
+
# 创建目录
|
39
|
+
images_dir.mkdir(parents=True, exist_ok=True)
|
40
|
+
labels_dir.mkdir(parents=True, exist_ok=True)
|
41
|
+
|
42
|
+
print(f"导出 YOLO 格式到: {output_path}")
|
43
|
+
print(f" 使用软链接: {use_symlinks}")
|
44
|
+
|
45
|
+
success_count = 0
|
46
|
+
error_count = 0
|
47
|
+
|
48
|
+
for img in dataset.images:
|
49
|
+
try:
|
50
|
+
# 获取图片文件名(不含扩展名)
|
51
|
+
img_path = Path(img.path)
|
52
|
+
img_name = img_path.name
|
53
|
+
stem = img_path.stem
|
54
|
+
|
55
|
+
# 1. 处理图片(软链接或复制)
|
56
|
+
target_img_path = images_dir / img_name
|
57
|
+
|
58
|
+
if target_img_path.exists() and not overwrite:
|
59
|
+
# 文件已存在,跳过
|
60
|
+
pass
|
61
|
+
else:
|
62
|
+
if use_symlinks:
|
63
|
+
# 使用软链接
|
64
|
+
if target_img_path.exists():
|
65
|
+
target_img_path.unlink()
|
66
|
+
target_img_path.symlink_to(img_path.resolve())
|
67
|
+
else:
|
68
|
+
# 复制文件
|
69
|
+
import shutil
|
70
|
+
shutil.copy2(img_path, target_img_path)
|
71
|
+
|
72
|
+
# 2. 生成标注文件
|
73
|
+
label_path = labels_dir / f"{stem}.txt"
|
74
|
+
|
75
|
+
with open(label_path, 'w') as f:
|
76
|
+
for ann in img.annotations:
|
77
|
+
# 内部格式: [x_min, y_min, width, height] (绝对像素值)
|
78
|
+
# YOLO 格式: class_id x_center y_center width height (归一化)
|
79
|
+
|
80
|
+
x_min, y_min, width, height = ann.bbox
|
81
|
+
|
82
|
+
# 转换为 YOLO 归一化格式
|
83
|
+
x_center = (x_min + width / 2) / img.width
|
84
|
+
y_center = (y_min + height / 2) / img.height
|
85
|
+
norm_width = width / img.width
|
86
|
+
norm_height = height / img.height
|
87
|
+
|
88
|
+
# 写入:class_id x_center y_center width height
|
89
|
+
f.write(f"{ann.category_id} {x_center:.6f} {y_center:.6f} {norm_width:.6f} {norm_height:.6f}\n")
|
90
|
+
|
91
|
+
success_count += 1
|
92
|
+
|
93
|
+
except Exception as e:
|
94
|
+
print(f"警告: 处理图片失败 {img.path}: {e}")
|
95
|
+
error_count += 1
|
96
|
+
continue
|
97
|
+
|
98
|
+
print(f"✓ 导出完成:")
|
99
|
+
print(f" 成功: {success_count} 张图片")
|
100
|
+
if error_count > 0:
|
101
|
+
print(f" 失败: {error_count} 张图片")
|
102
|
+
print(f" 图片目录: {images_dir}")
|
103
|
+
print(f" 标注目录: {labels_dir}")
|
104
|
+
|
105
|
+
return output_path
|
106
|
+
|
107
|
+
|
108
|
+
def export_to_yolo_and_txt(
|
109
|
+
dataset,
|
110
|
+
yolo_dir: str,
|
111
|
+
txt_file: str,
|
112
|
+
use_symlinks: bool = True,
|
113
|
+
use_relative_paths: bool = False
|
114
|
+
):
|
115
|
+
"""
|
116
|
+
导出为 YOLO 格式并生成对应的 txt 列表文件
|
117
|
+
|
118
|
+
参数:
|
119
|
+
dataset: Dataset 对象
|
120
|
+
yolo_dir: YOLO 格式输出目录
|
121
|
+
txt_file: txt 列表文件路径
|
122
|
+
use_symlinks: 是否使用软链接
|
123
|
+
use_relative_paths: txt 中是否使用相对路径
|
124
|
+
|
125
|
+
返回:
|
126
|
+
yolo_dir_path: YOLO 目录路径
|
127
|
+
"""
|
128
|
+
# 1. 导出为 YOLO 格式
|
129
|
+
yolo_path = export_to_yolo_format(dataset, yolo_dir, use_symlinks=use_symlinks)
|
130
|
+
|
131
|
+
# 2. 生成 txt 列表文件(指向 YOLO 目录中的 images/)
|
132
|
+
images_dir = yolo_path / 'images'
|
133
|
+
txt_path = Path(txt_file)
|
134
|
+
txt_path.parent.mkdir(parents=True, exist_ok=True)
|
135
|
+
|
136
|
+
print(f"\n生成 txt 列表: {txt_file}")
|
137
|
+
|
138
|
+
with open(txt_file, 'w') as f:
|
139
|
+
for img in dataset.images:
|
140
|
+
img_name = Path(img.path).name
|
141
|
+
# 指向 YOLO 目录中的图片(可能是软链接)
|
142
|
+
img_in_yolo = images_dir / img_name
|
143
|
+
|
144
|
+
if use_relative_paths:
|
145
|
+
# 相对于 txt 文件的路径
|
146
|
+
rel_path = os.path.relpath(img_in_yolo, txt_path.parent)
|
147
|
+
f.write(f"{rel_path}\n")
|
148
|
+
else:
|
149
|
+
# 绝对路径(规范化但不解析软链接)
|
150
|
+
# 使用 os.path.normpath 规范化路径,去除 .. 等
|
151
|
+
normalized_path = os.path.normpath(str(img_in_yolo.absolute()))
|
152
|
+
f.write(f"{normalized_path}\n")
|
153
|
+
|
154
|
+
print(f"✓ txt 列表已生成: {len(dataset.images)} 行")
|
155
|
+
|
156
|
+
return yolo_path
|
157
|
+
|
@@ -186,4 +186,149 @@ def load_csv_result_from_local(dataset_path: str, categories: Dict[int, str] = N
|
|
186
186
|
|
187
187
|
print(f"加载完成. 共找到 {image_count} 张图片, {len(dataset.categories)} 个类别.")
|
188
188
|
print(f"类别映射: {dataset.categories}")
|
189
|
+
return dataset
|
190
|
+
|
191
|
+
|
192
|
+
def load_predictions_from_streamlined(
|
193
|
+
predictions_dir: str,
|
194
|
+
categories: Dict[int, str],
|
195
|
+
image_dir: str = None
|
196
|
+
) -> Dataset:
|
197
|
+
"""
|
198
|
+
从streamlined推理结果目录加载预测数据集。
|
199
|
+
|
200
|
+
预测文件格式(每行一个检测):
|
201
|
+
class_id,confidence,center_x,center_y,width,height
|
202
|
+
例如: 0,0.934679,354.00,388.00,274.00,102.00
|
203
|
+
|
204
|
+
参数:
|
205
|
+
predictions_dir: 预测结果txt文件所在目录
|
206
|
+
categories: 类别映射字典 {class_id: class_name}
|
207
|
+
image_dir: 图像目录(可选,用于读取图像尺寸)
|
208
|
+
如果不提供,将尝试从预测文件同级目录查找
|
209
|
+
|
210
|
+
返回:
|
211
|
+
Dataset: 预测数据集对象,dataset_type='pred'
|
212
|
+
"""
|
213
|
+
pred_path = Path(predictions_dir)
|
214
|
+
|
215
|
+
if not pred_path.is_dir():
|
216
|
+
raise FileNotFoundError(f"预测结果目录不存在: {pred_path}")
|
217
|
+
|
218
|
+
# 尝试自动查找图像目录
|
219
|
+
if image_dir is None:
|
220
|
+
# 尝试常见的图像目录位置
|
221
|
+
possible_image_dirs = [
|
222
|
+
pred_path.parent / 'images',
|
223
|
+
pred_path.parent.parent / 'images',
|
224
|
+
]
|
225
|
+
for possible_dir in possible_image_dirs:
|
226
|
+
if possible_dir.is_dir():
|
227
|
+
image_dir = str(possible_dir)
|
228
|
+
print(f"自动找到图像目录: {image_dir}")
|
229
|
+
break
|
230
|
+
|
231
|
+
dataset = Dataset(
|
232
|
+
name=pred_path.name,
|
233
|
+
categories=categories,
|
234
|
+
dataset_type="pred"
|
235
|
+
)
|
236
|
+
|
237
|
+
supported_extensions = ['.jpg', '.jpeg', '.png']
|
238
|
+
txt_files = list(pred_path.glob('*.txt'))
|
239
|
+
|
240
|
+
print(f"开始加载预测结果: {pred_path.name}...")
|
241
|
+
print(f"找到 {len(txt_files)} 个预测文件")
|
242
|
+
|
243
|
+
loaded_count = 0
|
244
|
+
skipped_count = 0
|
245
|
+
|
246
|
+
for txt_file in txt_files:
|
247
|
+
# 预测文件名对应的图像文件名(假设同名)
|
248
|
+
image_base_name = txt_file.stem
|
249
|
+
|
250
|
+
# 尝试查找对应的图像文件
|
251
|
+
image_path = None
|
252
|
+
img_width, img_height = None, None
|
253
|
+
|
254
|
+
if image_dir:
|
255
|
+
image_dir_path = Path(image_dir)
|
256
|
+
for ext in supported_extensions:
|
257
|
+
potential_image = image_dir_path / (image_base_name + ext)
|
258
|
+
if potential_image.exists():
|
259
|
+
image_path = str(potential_image.resolve())
|
260
|
+
try:
|
261
|
+
with Image.open(potential_image) as img:
|
262
|
+
img_width, img_height = img.size
|
263
|
+
except IOError:
|
264
|
+
print(f"警告: 无法打开图片 {potential_image}")
|
265
|
+
break
|
266
|
+
|
267
|
+
# 如果没有找到图像,使用默认值
|
268
|
+
if image_path is None:
|
269
|
+
# 假设一个默认的图像路径和尺寸
|
270
|
+
image_path = f"unknown/{image_base_name}.jpg"
|
271
|
+
img_width, img_height = 640, 640 # 默认尺寸
|
272
|
+
if image_dir:
|
273
|
+
skipped_count += 1
|
274
|
+
|
275
|
+
# 创建图像标注对象
|
276
|
+
image_annotation = ImageAnnotation(
|
277
|
+
image_id=image_base_name + '.jpg',
|
278
|
+
path=image_path,
|
279
|
+
width=img_width,
|
280
|
+
height=img_height
|
281
|
+
)
|
282
|
+
|
283
|
+
# 读取预测结果
|
284
|
+
try:
|
285
|
+
with open(txt_file, 'r') as f:
|
286
|
+
for line in f:
|
287
|
+
line = line.strip()
|
288
|
+
if not line:
|
289
|
+
continue
|
290
|
+
|
291
|
+
# 解析格式: class_id,confidence,center_x,center_y,width,height
|
292
|
+
parts = line.split(',')
|
293
|
+
if len(parts) != 6:
|
294
|
+
print(f"警告: 格式错误,已跳过: {txt_file} -> '{line}'")
|
295
|
+
continue
|
296
|
+
|
297
|
+
try:
|
298
|
+
class_id = int(parts[0])
|
299
|
+
confidence = float(parts[1])
|
300
|
+
center_x = float(parts[2])
|
301
|
+
center_y = float(parts[3])
|
302
|
+
width = float(parts[4])
|
303
|
+
height = float(parts[5])
|
304
|
+
|
305
|
+
# 转换为 [x_min, y_min, width, height] 格式
|
306
|
+
x_min = center_x - width / 2
|
307
|
+
y_min = center_y - height / 2
|
308
|
+
|
309
|
+
annotation = Annotation(
|
310
|
+
category_id=class_id,
|
311
|
+
bbox=[x_min, y_min, width, height],
|
312
|
+
confidence=confidence
|
313
|
+
)
|
314
|
+
image_annotation.annotations.append(annotation)
|
315
|
+
|
316
|
+
except (ValueError, IndexError) as e:
|
317
|
+
print(f"警告: 解析错误,已跳过: {txt_file} -> '{line}' ({e})")
|
318
|
+
continue
|
319
|
+
|
320
|
+
except Exception as e:
|
321
|
+
print(f"警告: 读取文件失败,已跳过: {txt_file} ({e})")
|
322
|
+
continue
|
323
|
+
|
324
|
+
dataset.images.append(image_annotation)
|
325
|
+
loaded_count += 1
|
326
|
+
|
327
|
+
print(f"加载完成. 成功加载 {loaded_count} 个预测文件")
|
328
|
+
if skipped_count > 0:
|
329
|
+
print(f"警告: {skipped_count} 个文件未找到对应图像,使用默认尺寸")
|
330
|
+
|
331
|
+
total_detections = sum(len(img.annotations) for img in dataset.images)
|
332
|
+
print(f"总检测数: {total_detections}")
|
333
|
+
|
189
334
|
return dataset
|
@@ -24,4 +24,6 @@ class Dataset:
|
|
24
24
|
"""代表一个完整的数据集对象,作为系统内部的标准化表示."""
|
25
25
|
name: str
|
26
26
|
images: List[ImageAnnotation] = field(default_factory=list)
|
27
|
-
categories: Dict[int, str] = field(default_factory=dict)
|
27
|
+
categories: Dict[int, str] = field(default_factory=dict)
|
28
|
+
dataset_type: str = "train" # 'train', 'gt', 'pred'
|
29
|
+
metadata: Dict = field(default_factory=dict) # 存储描述性信息,不包含处理参数
|