autogluon.multimodal 1.1.2b20241106__py3-none-any.whl → 1.1.2b20241108__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autogluon/multimodal/configs/model/default.yaml +1 -0
- autogluon/multimodal/configs/pretrain/detection/dino/dino-4scale_r50_8xb2-12e_coco.py +1 -1
- autogluon/multimodal/learners/object_detection.py +91 -36
- autogluon/multimodal/predictor.py +8 -2
- autogluon/multimodal/utils/__init__.py +1 -1
- autogluon/multimodal/utils/hpo.py +14 -5
- autogluon/multimodal/utils/object_detection.py +131 -47
- autogluon/multimodal/version.py +1 -1
- {autogluon.multimodal-1.1.2b20241106.dist-info → autogluon.multimodal-1.1.2b20241108.dist-info}/METADATA +8 -8
- {autogluon.multimodal-1.1.2b20241106.dist-info → autogluon.multimodal-1.1.2b20241108.dist-info}/RECORD +17 -17
- /autogluon.multimodal-1.1.2b20241106-py3.8-nspkg.pth → /autogluon.multimodal-1.1.2b20241108-py3.8-nspkg.pth +0 -0
- {autogluon.multimodal-1.1.2b20241106.dist-info → autogluon.multimodal-1.1.2b20241108.dist-info}/LICENSE +0 -0
- {autogluon.multimodal-1.1.2b20241106.dist-info → autogluon.multimodal-1.1.2b20241108.dist-info}/NOTICE +0 -0
- {autogluon.multimodal-1.1.2b20241106.dist-info → autogluon.multimodal-1.1.2b20241108.dist-info}/WHEEL +0 -0
- {autogluon.multimodal-1.1.2b20241106.dist-info → autogluon.multimodal-1.1.2b20241108.dist-info}/namespace_packages.txt +0 -0
- {autogluon.multimodal-1.1.2b20241106.dist-info → autogluon.multimodal-1.1.2b20241108.dist-info}/top_level.txt +0 -0
- {autogluon.multimodal-1.1.2b20241106.dist-info → autogluon.multimodal-1.1.2b20241108.dist-info}/zip-safe +0 -0
@@ -115,6 +115,7 @@ model:
|
|
115
115
|
max_img_num_per_col: 1
|
116
116
|
output_bbox_format: "xyxy" # now support xyxy or xywh, for bbox format details see https://keras.io/api/keras_cv/bounding_box/formats/
|
117
117
|
frozen_layers: null
|
118
|
+
coco_root: null
|
118
119
|
|
119
120
|
mmocr_text_detection:
|
120
121
|
checkpoint_name: "TextSnake"
|
@@ -84,7 +84,7 @@ model = dict(
|
|
84
84
|
# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
|
85
85
|
# from the default setting in mmdet.
|
86
86
|
train_pipeline = [
|
87
|
-
dict(type="LoadImageFromFile", backend_args={{_base_.backend_args}}),
|
87
|
+
dict(type="LoadImageFromFile", backend_args={{_base_.backend_args}}), # nosec
|
88
88
|
dict(type="LoadAnnotations", with_bbox=True),
|
89
89
|
dict(type="RandomFlip", prob=0.5),
|
90
90
|
dict(
|
@@ -15,12 +15,13 @@ from ..utils import (
|
|
15
15
|
check_if_packages_installed,
|
16
16
|
cocoeval,
|
17
17
|
convert_pred_to_xywh,
|
18
|
+
convert_result_df,
|
18
19
|
create_fusion_model,
|
19
20
|
extract_from_output,
|
20
21
|
from_coco_or_voc,
|
21
22
|
get_detection_classes,
|
22
23
|
object_detection_data_to_df,
|
23
|
-
|
24
|
+
save_result_coco_format,
|
24
25
|
setup_save_path,
|
25
26
|
split_train_tuning_data,
|
26
27
|
)
|
@@ -39,8 +40,9 @@ class ObjectDetectionLearner(BaseLearner):
|
|
39
40
|
hyperparameters: Optional[dict] = None,
|
40
41
|
path: Optional[str] = None,
|
41
42
|
verbosity: Optional[int] = 2,
|
42
|
-
num_classes: Optional[int] = None,
|
43
|
-
classes: Optional[list] = None,
|
43
|
+
num_classes: Optional[int] = None,
|
44
|
+
classes: Optional[list] = None,
|
45
|
+
category_ids: Optional[list] = None,
|
44
46
|
warn_if_exist: Optional[bool] = True,
|
45
47
|
enable_progress_bar: Optional[bool] = None,
|
46
48
|
pretrained: Optional[bool] = True,
|
@@ -74,14 +76,17 @@ class ObjectDetectionLearner(BaseLearner):
|
|
74
76
|
)
|
75
77
|
check_if_packages_installed(problem_type=self._problem_type)
|
76
78
|
|
79
|
+
self._config = self.get_config_per_run(config=self._config, hyperparameters=hyperparameters)
|
80
|
+
|
77
81
|
self._output_shape = num_classes
|
78
82
|
self._classes = classes
|
83
|
+
self._category_ids = category_ids
|
79
84
|
self._sample_data_path = sample_data_path
|
80
85
|
|
81
86
|
# TODO: merge object detection and open vocabulary object detection
|
82
87
|
self._label_column = "label"
|
83
88
|
if self._sample_data_path is not None:
|
84
|
-
self._classes = get_detection_classes(self._sample_data_path)
|
89
|
+
self._classes, self._category_ids = get_detection_classes(self._sample_data_path)
|
85
90
|
self._output_shape = len(self._classes)
|
86
91
|
|
87
92
|
# TODO: merge _detection_anno_train and detection_anno_train?
|
@@ -97,15 +102,32 @@ class ObjectDetectionLearner(BaseLearner):
|
|
97
102
|
"""
|
98
103
|
Return the classes of object detection.
|
99
104
|
"""
|
100
|
-
|
105
|
+
if self._model.model.CLASSES is not None and self._classes is not None:
|
106
|
+
assert self._classes == self._model.model.CLASSES, f"{self._classes}\n{self._model.model.CLASSES}"
|
107
|
+
return self._classes if self._classes is not None else self._model.model.CLASSES
|
108
|
+
|
109
|
+
@property
|
110
|
+
def category_ids(self):
|
111
|
+
"""
|
112
|
+
Return the classes of object detection.
|
113
|
+
"""
|
114
|
+
return self._category_ids
|
101
115
|
|
102
116
|
def setup_detection_train_tuning_data(self, max_num_tuning_data, seed, train_data, tuning_data):
|
103
117
|
if isinstance(train_data, str):
|
104
118
|
self._detection_anno_train = train_data
|
105
|
-
train_data = from_coco_or_voc(
|
119
|
+
train_data = from_coco_or_voc(
|
120
|
+
train_data,
|
121
|
+
"train",
|
122
|
+
coco_root=self._config.model.mmdet_image.coco_root,
|
123
|
+
) # TODO: Refactor to use convert_data_to_df
|
106
124
|
if tuning_data is not None:
|
107
125
|
self.detection_anno_train = tuning_data
|
108
|
-
tuning_data = from_coco_or_voc(
|
126
|
+
tuning_data = from_coco_or_voc(
|
127
|
+
tuning_data,
|
128
|
+
"val",
|
129
|
+
coco_root=self._config.model.mmdet_image.coco_root,
|
130
|
+
) # TODO: Refactor to use convert_data_to_df
|
109
131
|
if max_num_tuning_data is not None:
|
110
132
|
if len(tuning_data) > max_num_tuning_data:
|
111
133
|
tuning_data = tuning_data.sample(
|
@@ -114,10 +136,16 @@ class ObjectDetectionLearner(BaseLearner):
|
|
114
136
|
elif isinstance(train_data, pd.DataFrame):
|
115
137
|
self._detection_anno_train = None
|
116
138
|
# sanity check dataframe columns
|
117
|
-
train_data = object_detection_data_to_df(
|
139
|
+
train_data = object_detection_data_to_df(
|
140
|
+
train_data,
|
141
|
+
coco_root=self._config.model.mmdet_image.coco_root,
|
142
|
+
)
|
118
143
|
if tuning_data is not None:
|
119
144
|
self.detection_anno_train = tuning_data
|
120
|
-
tuning_data = object_detection_data_to_df(
|
145
|
+
tuning_data = object_detection_data_to_df(
|
146
|
+
tuning_data,
|
147
|
+
coco_root=self._config.model.mmdet_image.coco_root,
|
148
|
+
)
|
121
149
|
if max_num_tuning_data is not None:
|
122
150
|
if len(tuning_data) > max_num_tuning_data:
|
123
151
|
tuning_data = tuning_data.sample(
|
@@ -556,7 +584,9 @@ class ObjectDetectionLearner(BaseLearner):
|
|
556
584
|
if isinstance(anno_file_or_df, str):
|
557
585
|
anno_file = anno_file_or_df
|
558
586
|
data = from_coco_or_voc(
|
559
|
-
anno_file,
|
587
|
+
anno_file,
|
588
|
+
"test",
|
589
|
+
coco_root=self._config.model.mmdet_image.coco_root,
|
560
590
|
) # TODO: maybe remove default splits hardcoding (only used in VOC)
|
561
591
|
if os.path.isdir(anno_file):
|
562
592
|
eval_tool = "torchmetrics" # we can only use torchmetrics for VOC format evaluation.
|
@@ -636,7 +666,10 @@ class ObjectDetectionLearner(BaseLearner):
|
|
636
666
|
eval_tool=eval_tool,
|
637
667
|
)
|
638
668
|
else:
|
639
|
-
data = object_detection_data_to_df(
|
669
|
+
data = object_detection_data_to_df(
|
670
|
+
data,
|
671
|
+
coco_root=self._config.model.mmdet_image.coco_root,
|
672
|
+
)
|
640
673
|
return self.evaluate_coco(
|
641
674
|
anno_file_or_df=data,
|
642
675
|
metrics=metrics,
|
@@ -648,6 +681,7 @@ class ObjectDetectionLearner(BaseLearner):
|
|
648
681
|
self,
|
649
682
|
data: Union[pd.DataFrame, dict, list, str],
|
650
683
|
as_pandas: Optional[bool] = None,
|
684
|
+
as_coco: Optional[bool] = True,
|
651
685
|
realtime: Optional[bool] = False,
|
652
686
|
save_results: Optional[bool] = None,
|
653
687
|
**kwargs,
|
@@ -662,6 +696,8 @@ class ObjectDetectionLearner(BaseLearner):
|
|
662
696
|
follow same format (except for the `label` column).
|
663
697
|
as_pandas
|
664
698
|
Whether to return the output as a pandas DataFrame(Series) (True) or numpy array (False).
|
699
|
+
as_coco
|
700
|
+
Whether to save the output as a COCO json file (True) or pandas DataFrame (False).
|
665
701
|
realtime
|
666
702
|
Whether to do realtime inference, which is efficient for small data (default False).
|
667
703
|
If provided None, we would infer it on based on the data modalities
|
@@ -674,46 +710,65 @@ class ObjectDetectionLearner(BaseLearner):
|
|
674
710
|
Array of predictions, one corresponding to each row in given dataset.
|
675
711
|
"""
|
676
712
|
self.ensure_predict_ready()
|
713
|
+
if as_pandas is None and isinstance(data, pd.DataFrame):
|
714
|
+
as_pandas = True
|
715
|
+
|
677
716
|
ret_type = BBOX
|
717
|
+
|
718
|
+
# only supports coco/voc format for OBJECT_DETECTION
|
678
719
|
if self._problem_type == OBJECT_DETECTION:
|
679
|
-
|
680
|
-
|
720
|
+
data_path = data
|
721
|
+
data_df = object_detection_data_to_df(
|
722
|
+
data_path,
|
723
|
+
coco_root=self._config.model.mmdet_image.coco_root,
|
724
|
+
)
|
725
|
+
if self._label_column not in data_df:
|
681
726
|
self._label_column = None
|
682
727
|
|
683
728
|
outputs = self.predict_per_run(
|
684
|
-
data=
|
729
|
+
data=data_df,
|
685
730
|
realtime=realtime,
|
686
731
|
requires_label=False,
|
687
732
|
)
|
688
733
|
pred = extract_from_output(outputs=outputs, ret_type=ret_type)
|
689
|
-
if self._problem_type == OBJECT_DETECTION:
|
690
|
-
if self._model.output_bbox_format == XYWH:
|
691
|
-
pred = convert_pred_to_xywh(pred)
|
692
734
|
|
693
|
-
|
735
|
+
self._save_path = setup_save_path(
|
736
|
+
old_save_path=self._save_path,
|
737
|
+
warn_if_exist=False,
|
738
|
+
)
|
739
|
+
result_path = os.path.join(self._save_path, "result.txt")
|
740
|
+
|
741
|
+
pred_df = convert_result_df(
|
742
|
+
pred=convert_pred_to_xywh(pred) if self._model.output_bbox_format == XYWH else pred,
|
743
|
+
data=data_df,
|
744
|
+
detection_classes=self.classes,
|
745
|
+
result_path=result_path,
|
746
|
+
)
|
747
|
+
|
748
|
+
if save_results:
|
694
749
|
self._save_path = setup_save_path(
|
695
750
|
old_save_path=self._save_path,
|
696
751
|
warn_if_exist=False,
|
697
752
|
)
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
pred=pred,
|
711
|
-
data=data,
|
712
|
-
detection_classes=self._model.model.CLASSES,
|
713
|
-
result_path=None,
|
714
|
-
)
|
753
|
+
if as_coco:
|
754
|
+
result_path = os.path.join(self._save_path, "result.json")
|
755
|
+
save_result_coco_format(
|
756
|
+
data_path=data_path,
|
757
|
+
pred=pred,
|
758
|
+
category_ids=self.category_ids,
|
759
|
+
result_path=result_path,
|
760
|
+
coco_root=self._config.model.mmdet_image.coco_root,
|
761
|
+
)
|
762
|
+
else:
|
763
|
+
pred_df.to_csv(result_path, index=False)
|
764
|
+
logger.info(f"Saved detection results {'as coco' if as_coco else 'as dataframe'} to {result_path}")
|
715
765
|
|
716
|
-
|
766
|
+
if as_pandas:
|
767
|
+
return pred_df
|
768
|
+
else:
|
769
|
+
if self._model.output_bbox_format == XYWH:
|
770
|
+
pred = convert_pred_to_xywh(pred)
|
771
|
+
return pred
|
717
772
|
|
718
773
|
def predict_proba(
|
719
774
|
self,
|
@@ -605,6 +605,7 @@ class MultiModalPredictor:
|
|
605
605
|
as_pandas: Optional[bool] = None,
|
606
606
|
realtime: Optional[bool] = False,
|
607
607
|
save_results: Optional[bool] = None,
|
608
|
+
**kwargs,
|
608
609
|
):
|
609
610
|
"""
|
610
611
|
Predict the label column values for new data.
|
@@ -617,8 +618,8 @@ class MultiModalPredictor:
|
|
617
618
|
candidate_data
|
618
619
|
The candidate data from which to search the query data's matches.
|
619
620
|
id_mappings
|
620
|
-
|
621
|
-
|
621
|
+
Id-to-content mappings. The contents can be text, image, etc.
|
622
|
+
This is used when data contain the query/response identifiers instead of their contents.
|
622
623
|
as_pandas
|
623
624
|
Whether to return the output as a pandas DataFrame(Series) (True) or numpy array (False).
|
624
625
|
realtime
|
@@ -627,10 +628,14 @@ class MultiModalPredictor:
|
|
627
628
|
and sample number.
|
628
629
|
save_results
|
629
630
|
Whether to save the prediction results (only works for detection now)
|
631
|
+
**kwargs
|
632
|
+
Additional keyword arguments to pass to the underlying learner's predict method.
|
633
|
+
For example, `as_coco` for object detection tasks.
|
630
634
|
|
631
635
|
Returns
|
632
636
|
-------
|
633
637
|
Array of predictions, one corresponding to each row in given dataset.
|
638
|
+
Format depends on the specific learner and provided arguments.
|
634
639
|
"""
|
635
640
|
return self._learner.predict(
|
636
641
|
data=data,
|
@@ -639,6 +644,7 @@ class MultiModalPredictor:
|
|
639
644
|
realtime=realtime,
|
640
645
|
save_results=save_results,
|
641
646
|
id_mappings=id_mappings,
|
647
|
+
**kwargs,
|
642
648
|
)
|
643
649
|
|
644
650
|
def predict_proba(
|
@@ -82,6 +82,7 @@ from .object_detection import (
|
|
82
82
|
bbox_xyxy_to_xywh,
|
83
83
|
cocoeval,
|
84
84
|
convert_pred_to_xywh,
|
85
|
+
convert_result_df,
|
85
86
|
from_coco,
|
86
87
|
from_coco_or_voc,
|
87
88
|
from_dict,
|
@@ -90,7 +91,6 @@ from .object_detection import (
|
|
90
91
|
object_detection_data_to_df,
|
91
92
|
object_detection_df_to_coco,
|
92
93
|
save_result_coco_format,
|
93
|
-
save_result_df,
|
94
94
|
save_result_voc_format,
|
95
95
|
visualize_detection,
|
96
96
|
)
|
@@ -50,9 +50,9 @@ def hpo_trial(sampled_hyperparameters, learner, checkpoint_dir=None, **_fit_args
|
|
50
50
|
resources = context.get_trial_resources().required_resources
|
51
51
|
num_cpus = int(resources.get("CPU"))
|
52
52
|
|
53
|
-
|
54
|
-
|
55
|
-
|
53
|
+
# The original hyperparameters is the search space, replace it with the hyperparameters sampled
|
54
|
+
_fit_args["hyperparameters"] = sampled_hyperparameters
|
55
|
+
|
56
56
|
_fit_args["save_path"] = context.get_trial_dir() # We want to save each trial to a separate directory
|
57
57
|
logger.debug(f"hpo trial save_path: {_fit_args['save_path']}")
|
58
58
|
if checkpoint_dir is not None:
|
@@ -62,7 +62,15 @@ def hpo_trial(sampled_hyperparameters, learner, checkpoint_dir=None, **_fit_args
|
|
62
62
|
learner.fit_per_run(**_fit_args)
|
63
63
|
|
64
64
|
|
65
|
-
def build_final_learner(
|
65
|
+
def build_final_learner(
|
66
|
+
learner,
|
67
|
+
best_trial_path,
|
68
|
+
save_path,
|
69
|
+
last_ckpt_path,
|
70
|
+
is_matching,
|
71
|
+
standalone,
|
72
|
+
clean_ckpts,
|
73
|
+
):
|
66
74
|
"""
|
67
75
|
Build the final learner after HPO is finished.
|
68
76
|
|
@@ -167,12 +175,13 @@ def hyperparameter_tune(hyperparameter_tune_kwargs, resources, is_matching=False
|
|
167
175
|
mode = _fit_args.get("learner")._minmax_mode
|
168
176
|
save_path = _fit_args.get("save_path")
|
169
177
|
time_budget_s = _fit_args.get("max_time")
|
178
|
+
num_to_keep = hyperparameter_tune_kwargs.pop("num_to_keep", 3)
|
170
179
|
if time_budget_s is not None:
|
171
180
|
time_budget_s *= 0.95 # give some buffer time to ray
|
172
181
|
try:
|
173
182
|
run_config_kwargs = {
|
174
183
|
"checkpoint_config": CheckpointConfig(
|
175
|
-
num_to_keep=
|
184
|
+
num_to_keep=num_to_keep,
|
176
185
|
checkpoint_score_attribute=metric,
|
177
186
|
),
|
178
187
|
}
|
@@ -9,6 +9,7 @@ import defusedxml.ElementTree as ET
|
|
9
9
|
import numpy as np
|
10
10
|
import pandas as pd
|
11
11
|
import PIL
|
12
|
+
import torch
|
12
13
|
from torchmetrics.detection.mean_ap import MeanAveragePrecision
|
13
14
|
|
14
15
|
from ..constants import (
|
@@ -58,24 +59,43 @@ def _get_image_info(image_path: str):
|
|
58
59
|
|
59
60
|
def get_df_unique_classes(data: pd.DataFrame):
|
60
61
|
"""
|
61
|
-
Get the unique classes
|
62
|
+
Get the unique classes and their category IDs from the dataframe for object detection.
|
63
|
+
|
62
64
|
Parameters
|
63
65
|
----------
|
64
|
-
data
|
65
|
-
|
66
|
+
data : pd.DataFrame
|
67
|
+
DataFrame holding the data for object detection. Each row should contain a 'rois'
|
68
|
+
column with detection boxes and class labels.
|
69
|
+
|
66
70
|
Returns
|
67
71
|
-------
|
68
|
-
|
72
|
+
tuple
|
73
|
+
A tuple containing (class_names, category_ids) where:
|
74
|
+
- class_names: list of unique class name strings
|
75
|
+
- category_ids: dict mapping class names to their numeric IDs
|
69
76
|
"""
|
70
77
|
unique_classes = {}
|
78
|
+
|
79
|
+
# Iterate through all rows in the dataframe
|
71
80
|
for idx in range(data.shape[0]):
|
72
81
|
row = data.iloc[idx]
|
73
82
|
rois = row["rois"]
|
83
|
+
|
84
|
+
# Process each ROI in the current row
|
74
85
|
for roi in rois:
|
75
|
-
|
86
|
+
# Unpack ROI values (assuming last element is class label)
|
87
|
+
*_, class_label = roi
|
88
|
+
|
89
|
+
# Add new classes to the dictionary with auto-incrementing IDs
|
76
90
|
if class_label not in unique_classes:
|
77
|
-
|
78
|
-
|
91
|
+
# Start IDs from 1, as 0 is often reserved for background
|
92
|
+
unique_classes[class_label] = len(unique_classes) + 1
|
93
|
+
|
94
|
+
# Create the output lists/dicts
|
95
|
+
class_names = list(unique_classes.keys())
|
96
|
+
category_ids = unique_classes
|
97
|
+
|
98
|
+
return class_names, category_ids
|
79
99
|
|
80
100
|
|
81
101
|
def object_detection_df_to_coco(data: pd.DataFrame, save_path: Optional[str] = None):
|
@@ -145,7 +165,9 @@ def object_detection_df_to_coco(data: pd.DataFrame, save_path: Optional[str] = N
|
|
145
165
|
return output_json_dict
|
146
166
|
|
147
167
|
|
148
|
-
def object_detection_data_to_df(
|
168
|
+
def object_detection_data_to_df(
|
169
|
+
data: Union[pd.DataFrame, dict, list, str], coco_root: Optional[str] = None
|
170
|
+
) -> pd.DataFrame:
|
149
171
|
"""
|
150
172
|
Construct a dataframe from a data dictionary, json file path (for COCO), folder path (for VOC),
|
151
173
|
image path (for single image), list of image paths (for multiple images)
|
@@ -163,7 +185,7 @@ def object_detection_data_to_df(data: Union[pd.DataFrame, dict, list, str]) -> p
|
|
163
185
|
return from_list(data)
|
164
186
|
if isinstance(data, str):
|
165
187
|
if os.path.isdir(data) or data.endswith(".json"):
|
166
|
-
return from_coco_or_voc(data)
|
188
|
+
return from_coco_or_voc(data, coco_root=coco_root)
|
167
189
|
return from_str(data)
|
168
190
|
if isinstance(data, pd.DataFrame):
|
169
191
|
sanity_check_dataframe(data)
|
@@ -303,7 +325,7 @@ def from_voc(
|
|
303
325
|
rpath = Path(root).expanduser()
|
304
326
|
img_list = []
|
305
327
|
|
306
|
-
class_names = get_detection_classes(root)
|
328
|
+
class_names, _ = get_detection_classes(root)
|
307
329
|
|
308
330
|
NAME_TO_IDX = dict(zip(class_names, range(len(class_names))))
|
309
331
|
name_to_index = lambda name: NAME_TO_IDX[name]
|
@@ -700,7 +722,7 @@ def _check_load_coco_bbox(
|
|
700
722
|
|
701
723
|
def from_coco(
|
702
724
|
anno_file: Optional[str],
|
703
|
-
|
725
|
+
coco_root: Optional[str] = None,
|
704
726
|
min_object_area: Optional[Union[int, float]] = 0,
|
705
727
|
use_crowd: Optional[bool] = False,
|
706
728
|
):
|
@@ -718,7 +740,7 @@ def from_coco(
|
|
718
740
|
----------
|
719
741
|
anno_file
|
720
742
|
The path to the annotation file.
|
721
|
-
|
743
|
+
coco_root
|
722
744
|
Root of the COCO folder. The default relative root folder (if set to `None`) is `anno_file/../`.
|
723
745
|
min_object_area
|
724
746
|
Minimum object area to consider.
|
@@ -740,16 +762,19 @@ def from_coco(
|
|
740
762
|
coco = COCO(anno_file)
|
741
763
|
|
742
764
|
# get data root
|
743
|
-
if isinstance(
|
744
|
-
|
745
|
-
elif isinstance(
|
746
|
-
|
747
|
-
elif
|
765
|
+
if isinstance(coco_root, Path):
|
766
|
+
coco_root = str(coco_root.expanduser().resolve())
|
767
|
+
elif isinstance(coco_root, str):
|
768
|
+
coco_root = os.path.abspath(os.path.expanduser(coco_root))
|
769
|
+
elif coco_root is None:
|
748
770
|
# try to use the default coco structure
|
749
|
-
|
750
|
-
logger.info(
|
771
|
+
coco_root = os.path.join(os.path.dirname(anno_file), "..")
|
772
|
+
logger.info(
|
773
|
+
f"Using default root folder: {coco_root}. "
|
774
|
+
"Specify `model.mmdet_image.coco_root=...` in hyperparameters if you think it is wrong."
|
775
|
+
)
|
751
776
|
else:
|
752
|
-
raise ValueError("Unable to parse
|
777
|
+
raise ValueError("Unable to parse coco_root: {}".format(coco_root))
|
753
778
|
|
754
779
|
# support prediction using data with no annotations
|
755
780
|
# note that data with annotation can be used for prediction without any changes
|
@@ -764,9 +789,9 @@ def from_coco(
|
|
764
789
|
for entry in coco.loadImgs(image_ids):
|
765
790
|
if "coco_url" in entry:
|
766
791
|
dirname, filename = entry["coco_url"].split("/")[-2:]
|
767
|
-
abs_path = os.path.join(
|
792
|
+
abs_path = os.path.join(coco_root, dirname, filename)
|
768
793
|
else:
|
769
|
-
abs_path = os.path.join(
|
794
|
+
abs_path = os.path.join(coco_root, entry["file_name"])
|
770
795
|
if not os.path.exists(abs_path):
|
771
796
|
logger.warning(f"File skipped since not exists: {abs_path}.")
|
772
797
|
continue
|
@@ -809,7 +834,7 @@ def get_image_filename(path: str):
|
|
809
834
|
class COCODataset:
|
810
835
|
# The class that load/save COCO data format.
|
811
836
|
# TODO: refactor data loading into here
|
812
|
-
def __init__(self, anno_file: str):
|
837
|
+
def __init__(self, anno_file: str, category_ids: Optional[List] = None):
|
813
838
|
"""
|
814
839
|
Parameters
|
815
840
|
----------
|
@@ -828,7 +853,13 @@ class COCODataset:
|
|
828
853
|
img_id_list.append(int(img["id"]))
|
829
854
|
self.image_filename_to_id = dict(zip(img_filename_list, img_id_list))
|
830
855
|
|
831
|
-
|
856
|
+
if category_ids is None:
|
857
|
+
if "categories" in d:
|
858
|
+
self.category_ids = [cat["id"] for cat in d["categories"]]
|
859
|
+
else:
|
860
|
+
self.category_ids = range(9999) # TODO: remove hardcoding here
|
861
|
+
else:
|
862
|
+
self.category_ids = category_ids
|
832
863
|
|
833
864
|
def get_image_id_from_path(self, image_path: str):
|
834
865
|
"""
|
@@ -1146,7 +1177,7 @@ def process_voc_annotations(
|
|
1146
1177
|
f.writelines("\n".join(xml_file_names))
|
1147
1178
|
|
1148
1179
|
|
1149
|
-
def from_coco_or_voc(file_path: str, splits: Optional[Union[str]] = None):
|
1180
|
+
def from_coco_or_voc(file_path: str, splits: Optional[Union[str]] = None, coco_root: Optional[str] = None):
|
1150
1181
|
"""
|
1151
1182
|
Convert the data from coco or voc format to pandas Dataframe.
|
1152
1183
|
|
@@ -1167,54 +1198,68 @@ def from_coco_or_voc(file_path: str, splits: Optional[Union[str]] = None):
|
|
1167
1198
|
# VOC use dir as input
|
1168
1199
|
return from_voc(root=file_path, splits=splits)
|
1169
1200
|
else:
|
1170
|
-
return from_coco(file_path)
|
1201
|
+
return from_coco(file_path, coco_root=coco_root)
|
1171
1202
|
|
1172
1203
|
|
1173
1204
|
def get_coco_format_classes(sample_data_path: str):
|
1174
1205
|
"""
|
1175
|
-
|
1206
|
+
Get class names and category IDs for COCO format data.
|
1176
1207
|
|
1177
1208
|
Parameters
|
1178
1209
|
----------
|
1179
|
-
sample_data_path
|
1210
|
+
sample_data_path : str
|
1180
1211
|
The path to COCO format json annotation file. Could be any split, e.g. train/val/test/....
|
1181
1212
|
|
1182
1213
|
Returns
|
1183
1214
|
-------
|
1184
|
-
|
1215
|
+
tuple
|
1216
|
+
A tuple containing (class_names, category_ids) where:
|
1217
|
+
- class_names: list of class name strings
|
1218
|
+
- category_ids: dict mapping class names to their COCO category IDs
|
1185
1219
|
"""
|
1186
1220
|
try:
|
1187
1221
|
with open(sample_data_path, "r") as f:
|
1188
1222
|
annotation = json.load(f)
|
1189
1223
|
except:
|
1190
1224
|
raise ValueError(f"Failed to load json from provided json file: {sample_data_path}.")
|
1191
|
-
|
1225
|
+
|
1226
|
+
# Extract both names and IDs from categories
|
1227
|
+
class_names = [cat["name"] for cat in annotation["categories"]]
|
1228
|
+
|
1229
|
+
# Create mapping of names to their original COCO category IDs
|
1230
|
+
category_ids = [cat["id"] for cat in annotation["categories"]]
|
1231
|
+
|
1232
|
+
return class_names, category_ids
|
1192
1233
|
|
1193
1234
|
|
1194
1235
|
def get_voc_format_classes(root: str):
|
1195
1236
|
"""
|
1196
|
-
|
1237
|
+
Get class names and category IDs for VOC format data.
|
1197
1238
|
|
1198
1239
|
Parameters
|
1199
1240
|
----------
|
1200
|
-
root
|
1241
|
+
root : str
|
1201
1242
|
The path to the root directory of VOC data.
|
1202
1243
|
|
1203
1244
|
Returns
|
1204
1245
|
-------
|
1205
|
-
|
1246
|
+
tuple
|
1247
|
+
A tuple containing (class_names, category_ids) where:
|
1248
|
+
- class_names: list of class name strings
|
1249
|
+
- category_ids: dict mapping class names to their numeric IDs
|
1206
1250
|
"""
|
1207
1251
|
if is_url(root):
|
1208
1252
|
root = download(root)
|
1209
|
-
rpath = Path(root).expanduser()
|
1210
1253
|
|
1254
|
+
rpath = Path(root).expanduser()
|
1211
1255
|
labels_file = os.path.join(rpath, "labels.txt")
|
1256
|
+
|
1212
1257
|
if os.path.exists(labels_file):
|
1213
1258
|
with open(labels_file) as f:
|
1214
1259
|
class_names = [line.rstrip().lower() for line in f]
|
1215
1260
|
print(f"using class_names in labels.txt: {class_names}")
|
1216
1261
|
else:
|
1217
|
-
|
1262
|
+
# read the class names and save results
|
1218
1263
|
logger.warning(
|
1219
1264
|
"labels.txt does not exist, using default VOC names. "
|
1220
1265
|
"Creating labels.txt by scanning the directory: {}".format(os.path.join(root, "Annotations"))
|
@@ -1223,28 +1268,46 @@ def get_voc_format_classes(root: str):
|
|
1223
1268
|
voc_annotation_path=os.path.join(root, "Annotations"), voc_class_names_output_path=labels_file
|
1224
1269
|
)
|
1225
1270
|
|
1226
|
-
|
1271
|
+
# There are no category IDs in VOC format
|
1272
|
+
# Create category IDs dictionary starting from 1
|
1273
|
+
# 0 is typically reserved for background in many frameworks
|
1274
|
+
category_ids = [idx + 1 for idx, name in enumerate(class_names)]
|
1275
|
+
|
1276
|
+
return class_names, category_ids
|
1227
1277
|
|
1228
1278
|
|
1229
1279
|
def get_detection_classes(sample_data_path):
|
1230
1280
|
"""
|
1231
|
-
|
1281
|
+
Get class names and category IDs from detection dataset in various formats.
|
1232
1282
|
|
1233
1283
|
Parameters
|
1234
1284
|
----------
|
1235
|
-
sample_data_path
|
1236
|
-
|
1237
|
-
|
1285
|
+
sample_data_path : Union[str, pd.DataFrame]
|
1286
|
+
The input can be one of:
|
1287
|
+
- str (directory): Path to root directory of VOC format data
|
1288
|
+
- str (file): Path to COCO format JSON annotation file
|
1289
|
+
- pd.DataFrame: DataFrame containing detection data with 'rois' column
|
1238
1290
|
|
1239
1291
|
Returns
|
1240
1292
|
-------
|
1241
|
-
|
1293
|
+
tuple
|
1294
|
+
A tuple containing (class_names, category_ids) where:
|
1295
|
+
- class_names: list of class name strings
|
1296
|
+
- category_ids: dict mapping class names to their numeric IDs
|
1297
|
+
|
1298
|
+
For VOC: IDs start from 1
|
1299
|
+
For COCO: Original category IDs from annotation file
|
1300
|
+
For DataFrame: Sequential IDs starting from 1
|
1242
1301
|
"""
|
1302
|
+
# Handle string paths for VOC and COCO formats
|
1243
1303
|
if isinstance(sample_data_path, str):
|
1244
1304
|
if os.path.isdir(sample_data_path):
|
1305
|
+
# Directory path indicates VOC format
|
1245
1306
|
return get_voc_format_classes(sample_data_path)
|
1246
1307
|
else:
|
1308
|
+
# File path indicates COCO format JSON
|
1247
1309
|
return get_coco_format_classes(sample_data_path)
|
1310
|
+
# Handle DataFrame format
|
1248
1311
|
elif isinstance(sample_data_path, pd.DataFrame):
|
1249
1312
|
return get_df_unique_classes(sample_data_path)
|
1250
1313
|
|
@@ -1492,7 +1555,7 @@ def get_color(idx):
|
|
1492
1555
|
return color
|
1493
1556
|
|
1494
1557
|
|
1495
|
-
def
|
1558
|
+
def convert_result_df(
|
1496
1559
|
pred: Iterable, data: Union[pd.DataFrame, Dict], detection_classes: List[str], result_path: Optional[str] = None
|
1497
1560
|
):
|
1498
1561
|
"""
|
@@ -1539,11 +1602,11 @@ def save_result_df(
|
|
1539
1602
|
return result_df
|
1540
1603
|
|
1541
1604
|
|
1542
|
-
def save_result_coco_format(
|
1543
|
-
coco_dataset = COCODataset(
|
1605
|
+
def save_result_coco_format(data_path, pred, category_ids, result_path, coco_root: Optional[str] = None):
|
1606
|
+
coco_dataset = COCODataset(data_path, category_ids=category_ids)
|
1544
1607
|
result_name, _ = os.path.splitext(result_path)
|
1545
1608
|
result_path = result_name + ".json"
|
1546
|
-
coco_dataset.save_result(pred, from_coco_or_voc(
|
1609
|
+
coco_dataset.save_result(pred, from_coco_or_voc(data_path, "test", coco_root=coco_root), save_path=result_path)
|
1547
1610
|
logger.info(25, f"Saved detection result to {result_path}")
|
1548
1611
|
|
1549
1612
|
|
@@ -1554,9 +1617,30 @@ def save_result_voc_format(pred, result_path):
|
|
1554
1617
|
logger.info(25, f"Saved detection result to {result_path}")
|
1555
1618
|
|
1556
1619
|
|
1557
|
-
def convert_pred_to_xywh(pred: Optional[List]):
|
1620
|
+
def convert_pred_to_xywh(pred: Optional[List]) -> Optional[List]:
|
1621
|
+
"""
|
1622
|
+
Convert prediction bounding boxes from XYXY to XYWH format.
|
1623
|
+
|
1624
|
+
Args:
|
1625
|
+
pred: List of predictions, where each prediction contains 'bboxes' that can be
|
1626
|
+
either a torch.Tensor or numpy.ndarray
|
1627
|
+
|
1628
|
+
Returns:
|
1629
|
+
Modified list of predictions with bboxes in XYWH format
|
1630
|
+
"""
|
1558
1631
|
if not pred:
|
1559
1632
|
return pred
|
1633
|
+
|
1560
1634
|
for i, pred_per_image in enumerate(pred):
|
1561
|
-
|
1635
|
+
bboxes = pred_per_image["bboxes"]
|
1636
|
+
|
1637
|
+
# Handle numpy array case
|
1638
|
+
if isinstance(bboxes, np.ndarray):
|
1639
|
+
pred[i]["bboxes"] = bbox_xyxy_to_xywh(bboxes)
|
1640
|
+
# Handle torch.Tensor case
|
1641
|
+
elif torch.is_tensor(bboxes):
|
1642
|
+
pred[i]["bboxes"] = bbox_xyxy_to_xywh(bboxes.detach().numpy())
|
1643
|
+
else:
|
1644
|
+
raise TypeError(f"Unsupported bbox type: {type(bboxes)}. Expected numpy.ndarray or torch.Tensor")
|
1645
|
+
|
1562
1646
|
return pred
|
autogluon/multimodal/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: autogluon.multimodal
|
3
|
-
Version: 1.1.
|
3
|
+
Version: 1.1.2b20241108
|
4
4
|
Summary: Fast and Accurate ML in 3 Lines of Code
|
5
5
|
Home-page: https://github.com/autogluon/autogluon
|
6
6
|
Author: AutoGluon Community
|
@@ -41,10 +41,10 @@ Requires-Dist: scikit-learn<1.5.3,>=1.4.0
|
|
41
41
|
Requires-Dist: Pillow<12,>=10.0.1
|
42
42
|
Requires-Dist: tqdm<5,>=4.38
|
43
43
|
Requires-Dist: boto3<2,>=1.10
|
44
|
-
Requires-Dist: torch<2.
|
45
|
-
Requires-Dist: lightning<2.
|
46
|
-
Requires-Dist: transformers[sentencepiece]<
|
47
|
-
Requires-Dist: accelerate<
|
44
|
+
Requires-Dist: torch<2.6,>=2.2
|
45
|
+
Requires-Dist: lightning<2.6,>=2.2
|
46
|
+
Requires-Dist: transformers[sentencepiece]<5,>=4.38.0
|
47
|
+
Requires-Dist: accelerate<1.0,>=0.32.0
|
48
48
|
Requires-Dist: requests<3,>=2.30
|
49
49
|
Requires-Dist: jsonschema<4.22,>=4.18
|
50
50
|
Requires-Dist: seqeval<1.3.0,>=1.2.2
|
@@ -56,9 +56,9 @@ Requires-Dist: text-unidecode<1.4,>=1.3
|
|
56
56
|
Requires-Dist: torchmetrics<1.3.0,>=1.2.0
|
57
57
|
Requires-Dist: nptyping<2.5.0,>=1.4.4
|
58
58
|
Requires-Dist: omegaconf<2.3.0,>=2.1.1
|
59
|
-
Requires-Dist: autogluon.core[raytune]==1.1.
|
60
|
-
Requires-Dist: autogluon.features==1.1.
|
61
|
-
Requires-Dist: autogluon.common==1.1.
|
59
|
+
Requires-Dist: autogluon.core[raytune]==1.1.2b20241108
|
60
|
+
Requires-Dist: autogluon.features==1.1.2b20241108
|
61
|
+
Requires-Dist: autogluon.common==1.1.2b20241108
|
62
62
|
Requires-Dist: pytorch-metric-learning<2.4,>=1.3.0
|
63
63
|
Requires-Dist: nlpaug<1.2.0,>=1.1.10
|
64
64
|
Requires-Dist: nltk<3.9,>=3.4.5
|
@@ -1,11 +1,11 @@
|
|
1
|
-
autogluon.multimodal-1.1.
|
1
|
+
autogluon.multimodal-1.1.2b20241108-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
|
2
2
|
autogluon/multimodal/__init__.py,sha256=EuWb-QmtFBKePJw4_4Kpp9dKrabv121haYw_Oiu2jfI,238
|
3
3
|
autogluon/multimodal/constants.py,sha256=lFA03ZQeZlp8mwuXLuMOgeByljV5wPYBjN_hi6Xc8zg,8498
|
4
|
-
autogluon/multimodal/predictor.py,sha256=
|
4
|
+
autogluon/multimodal/predictor.py,sha256=beV2gOcTnviYtU8UWTWdqWYTbuk5sC6Sba-pAEaFQyg,40936
|
5
5
|
autogluon/multimodal/presets.py,sha256=VR_arn7X4eiQcGcvJVmwxDopPJGvYP1W1cBZ2AOcdJM,25882
|
6
6
|
autogluon/multimodal/problem_types.py,sha256=JPSoV3Fg-NGQansRlyT2bPZG3iIkgWo7eB8oDoAfg90,9201
|
7
7
|
autogluon/multimodal/registry.py,sha256=vqvONWweZP44aBo1jCvlqLdL0Agn2kyTK8uXUwagZhs,3670
|
8
|
-
autogluon/multimodal/version.py,sha256=
|
8
|
+
autogluon/multimodal/version.py,sha256=LF2xpVDjwJNbbPJVJI9mLUJAKTcMv21XRyuIrXDNfTI,90
|
9
9
|
autogluon/multimodal/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
autogluon/multimodal/cli/prepare_detection_dataset.py,sha256=9NCYmCUMPRWbxxbN_C7YQjMYlrAm8gbwJ3Qd-79JWH4,5218
|
11
11
|
autogluon/multimodal/cli/voc2coco.py,sha256=LXVu9RIfOZs_1URrzO-_3Nrvb9uGEgPxCY4-B6m1coc,9605
|
@@ -14,14 +14,14 @@ autogluon/multimodal/configs/data/default.yaml,sha256=gsadTDJ3ZbppQ5rpA0sPhqd9Mw
|
|
14
14
|
autogluon/multimodal/configs/distiller/default.yaml,sha256=DiCZYYJDEk5k03ZI-ewj9hWiKpMQbB8oqjWYavMK1wU,513
|
15
15
|
autogluon/multimodal/configs/environment/default.yaml,sha256=Di1EohFWUoGi0bLuJfOr3bDD4ceD_ZrfDc_GJO55mpk,1235
|
16
16
|
autogluon/multimodal/configs/matcher/default.yaml,sha256=K0ehM0uIFfKq1CeeaFcv14RBjo3khMgWKS2ymWI-V9I,218
|
17
|
-
autogluon/multimodal/configs/model/default.yaml,sha256=
|
17
|
+
autogluon/multimodal/configs/model/default.yaml,sha256=NgCPV01hfcG21hvmtLaiTbahTYkRi3ZmHONzsRdwjOE,8751
|
18
18
|
autogluon/multimodal/configs/optimization/default.yaml,sha256=w4xlVS1gUK8tDXyhfDtIKabTqr_gBWSduq1y8T9CBI8,2182
|
19
19
|
autogluon/multimodal/configs/pretrain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
20
|
autogluon/multimodal/configs/pretrain/detection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
21
21
|
autogluon/multimodal/configs/pretrain/detection/coco_detection.py,sha256=UlSwkWAkST_96RTzPZMuPuqIfv72U03-JdqwPd-NjiQ,3171
|
22
22
|
autogluon/multimodal/configs/pretrain/detection/default_runtime.py,sha256=9hJmjxb6fIo-kbbejQlJy4ayopRFUyA_w95plhAUFDw,793
|
23
23
|
autogluon/multimodal/configs/pretrain/detection/schedule_1x.py,sha256=VhZ8HT-ryeGW-GzxiVsDEIYf9Bw6ImOdPucFVJaN0Os,298
|
24
|
-
autogluon/multimodal/configs/pretrain/detection/dino/dino-4scale_r50_8xb2-12e_coco.py,sha256=
|
24
|
+
autogluon/multimodal/configs/pretrain/detection/dino/dino-4scale_r50_8xb2-12e_coco.py,sha256=r51gu03JBHrTjDXkT2CdNKTzEeWx2qxk8t-TEHgKwSQ,5919
|
25
25
|
autogluon/multimodal/configs/pretrain/detection/dino/dino-5scale_swin-l_8xb2-12e_coco.py,sha256=VYvjELfzX2jYmsQLFvMYWeixx2LmB_LwbPmzgLAPUwo,1137
|
26
26
|
autogluon/multimodal/configs/pretrain/detection/dino/dino-5scale_swin-l_8xb2-36e_coco.py,sha256=OL4Za_hd5IhQU8iHEAVrsFu-MnSmOPu1_WRa39i3QYA,266
|
27
27
|
autogluon/multimodal/configs/pretrain/detection/dino/dino_swinl_tta.py,sha256=vi5rhbaT3mgycIm1W8jQ6l-KuFoL24OiiF-rdj2CTNg,68
|
@@ -71,7 +71,7 @@ autogluon/multimodal/learners/base.py,sha256=IUHRBzwrKqAwo9nDsqzg0rBQaFiVxfyeMnd
|
|
71
71
|
autogluon/multimodal/learners/few_shot_svm.py,sha256=TXauhXr_2hWqaEwO8XhFxWRRPXDYxLpmmKYaCrxFWPM,23934
|
72
72
|
autogluon/multimodal/learners/matching.py,sha256=gueWrqy0g9gVbyBvQOAj03JgBwbJsBXeLLtKsiTzrnU,89891
|
73
73
|
autogluon/multimodal/learners/ner.py,sha256=0VZl_Z1O98A5mOSw8Ee8F9foaCT684DT0j1ALx-8RU4,19035
|
74
|
-
autogluon/multimodal/learners/object_detection.py,sha256=
|
74
|
+
autogluon/multimodal/learners/object_detection.py,sha256=NmaQWaakpnIA1owZNvGeO1B3Sk3p3ngWnNJM21EeeVM,30846
|
75
75
|
autogluon/multimodal/learners/semantic_segmentation.py,sha256=cy2ALYTtqeSnPo75htgr5STZ_utgkzeGxp5j4J1mScc,20183
|
76
76
|
autogluon/multimodal/models/__init__.py,sha256=wynO5U5zY_vElZPGL10Oqb7OVkj2iovqzml22MRL3iE,842
|
77
77
|
autogluon/multimodal/models/adaptation_layers.py,sha256=NuzwU_ghk8D2axmDuD8UEZ_HamoMSCcKMV9DB1AYWAg,38425
|
@@ -109,7 +109,7 @@ autogluon/multimodal/optimization/losses.py,sha256=n1nXpXgGmYfLv0b-51yDFp99szy3j
|
|
109
109
|
autogluon/multimodal/optimization/lr_scheduler.py,sha256=i3GG7T8ZyPXyS7feUVe7W3o6eSLIh_Ei7XujJL50uxw,5829
|
110
110
|
autogluon/multimodal/optimization/semantic_seg_metrics.py,sha256=tIbSk3iyBRRx7HnZdqIxltRBtDiBt-GX_zBxkMOFxQg,32894
|
111
111
|
autogluon/multimodal/optimization/utils.py,sha256=X6UknHcN2XID9WO2N2Of3-7MbWUfZO7ydNOktwwtbiU,34415
|
112
|
-
autogluon/multimodal/utils/__init__.py,sha256=
|
112
|
+
autogluon/multimodal/utils/__init__.py,sha256=GQiubE17Z5FVG7o5kZgwekmTHkYd6aL1MliZF21kbFI,2886
|
113
113
|
autogluon/multimodal/utils/cache.py,sha256=XiLB5xNUYklDc8R9M-2RUD0Y6NEqrZIZx6O1PpRIXxI,7766
|
114
114
|
autogluon/multimodal/utils/checkpoint.py,sha256=Khx4KXqF0S9Aw193kyUWNvJX2XkFv6y4IGYe9-txLJE,7699
|
115
115
|
autogluon/multimodal/utils/cloud_io.py,sha256=FhIJ_oEerJ8QOZz82SzP3tg-Z1mo_vHqlmcShHe077s,3031
|
@@ -120,7 +120,7 @@ autogluon/multimodal/utils/distillation.py,sha256=VYSY_excOESa84Q0w6IbV4wL_ER27W
|
|
120
120
|
autogluon/multimodal/utils/download.py,sha256=eOCw4b_EHBvHB9EcGesyzTm1f2AUrzz8KcZQ6j_D7-Y,10364
|
121
121
|
autogluon/multimodal/utils/environment.py,sha256=J1YYBcIL-YzAnoN5GC1DMF_Rpt0DxSpJp3NZxJZ_q6g,12814
|
122
122
|
autogluon/multimodal/utils/export.py,sha256=h7PizWsMaxMnlY7ssRNJxbExgt7B4XFkKY8hf7M-j4Y,11964
|
123
|
-
autogluon/multimodal/utils/hpo.py,sha256=
|
123
|
+
autogluon/multimodal/utils/hpo.py,sha256=KIpO7DmvowT-xua78yK5akKOFrJFKDLQajwaFkX0WWU,8813
|
124
124
|
autogluon/multimodal/utils/inference.py,sha256=VQAda55sf6rbuuxUS3MGJXyCAgb_xugLv1glCV2NlZk,12349
|
125
125
|
autogluon/multimodal/utils/label_studio.py,sha256=7lFl75zztIy6VCuCbyZkN-BLbtr0j1S4F42zJteGVYY,13437
|
126
126
|
autogluon/multimodal/utils/load.py,sha256=rzfADn6obbZL20QZc034IPhIiza7SA8f5MPpd9hPsAE,5106
|
@@ -131,15 +131,15 @@ autogluon/multimodal/utils/misc.py,sha256=WaDWN-6xCCL4tCkxMr4VMb5oiNmmBLrWo5FC3b
|
|
131
131
|
autogluon/multimodal/utils/mmcv.py,sha256=Jjg5PiPqiRNJk6yWkQQlNiqT7qhStN94QjqQsZO3uVw,922
|
132
132
|
autogluon/multimodal/utils/model.py,sha256=Z_9bev8nRk92cUZjPggVAM3R3CHFlecU-YzjkMGPUsE,21963
|
133
133
|
autogluon/multimodal/utils/nlpaug.py,sha256=rWztiOZDbtEGBdyjkXZ0DoSEpXGKX9akpDpFnz4xIMw,2557
|
134
|
-
autogluon/multimodal/utils/object_detection.py,sha256=
|
134
|
+
autogluon/multimodal/utils/object_detection.py,sha256=fHZxon5LoYRmNu_7jm_pDjesVxTa72nzZwgwP-5Fft8,53535
|
135
135
|
autogluon/multimodal/utils/onnx.py,sha256=rblWnphKTsfbosbieJu8PsH6SMDw4on9BS8bR1plL2U,5607
|
136
136
|
autogluon/multimodal/utils/save.py,sha256=zYIO3mYMGBvHfZcmCUaLpsQa14nVq1LPv2F76uaz89w,3951
|
137
137
|
autogluon/multimodal/utils/visualizer.py,sha256=qAc4_36r3MBDPq1R1jBeb_gcfzIrsylL1S31sRf3wuw,22562
|
138
|
-
autogluon.multimodal-1.1.
|
139
|
-
autogluon.multimodal-1.1.
|
140
|
-
autogluon.multimodal-1.1.
|
141
|
-
autogluon.multimodal-1.1.
|
142
|
-
autogluon.multimodal-1.1.
|
143
|
-
autogluon.multimodal-1.1.
|
144
|
-
autogluon.multimodal-1.1.
|
145
|
-
autogluon.multimodal-1.1.
|
138
|
+
autogluon.multimodal-1.1.2b20241108.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
|
139
|
+
autogluon.multimodal-1.1.2b20241108.dist-info/METADATA,sha256=6I1NONkJaNxI-zUF9uUZZChGOW6UwDAaFvuhej3tF0E,12880
|
140
|
+
autogluon.multimodal-1.1.2b20241108.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
|
141
|
+
autogluon.multimodal-1.1.2b20241108.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
142
|
+
autogluon.multimodal-1.1.2b20241108.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
143
|
+
autogluon.multimodal-1.1.2b20241108.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
|
144
|
+
autogluon.multimodal-1.1.2b20241108.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
145
|
+
autogluon.multimodal-1.1.2b20241108.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|