autogluon.multimodal 1.4.1b20250804__py3-none-any.whl → 1.4.1b20250806__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. autogluon/multimodal/data/dataset_mmlab/multi_image_mix_dataset.py +1 -1
  2. autogluon/multimodal/data/process_document.py +6 -6
  3. autogluon/multimodal/data/process_mmlab/process_mmdet.py +3 -3
  4. autogluon/multimodal/data/process_mmlab/process_mmlab_base.py +3 -3
  5. autogluon/multimodal/data/process_ner.py +6 -6
  6. autogluon/multimodal/data/process_semantic_seg_img.py +8 -8
  7. autogluon/multimodal/learners/object_detection.py +11 -1
  8. autogluon/multimodal/utils/object_detection.py +5 -2
  9. autogluon/multimodal/version.py +1 -1
  10. {autogluon.multimodal-1.4.1b20250804.dist-info → autogluon.multimodal-1.4.1b20250806.dist-info}/METADATA +7 -7
  11. {autogluon.multimodal-1.4.1b20250804.dist-info → autogluon.multimodal-1.4.1b20250806.dist-info}/RECORD +18 -18
  12. /autogluon.multimodal-1.4.1b20250804-py3.9-nspkg.pth → /autogluon.multimodal-1.4.1b20250806-py3.9-nspkg.pth +0 -0
  13. {autogluon.multimodal-1.4.1b20250804.dist-info → autogluon.multimodal-1.4.1b20250806.dist-info}/LICENSE +0 -0
  14. {autogluon.multimodal-1.4.1b20250804.dist-info → autogluon.multimodal-1.4.1b20250806.dist-info}/NOTICE +0 -0
  15. {autogluon.multimodal-1.4.1b20250804.dist-info → autogluon.multimodal-1.4.1b20250806.dist-info}/WHEEL +0 -0
  16. {autogluon.multimodal-1.4.1b20250804.dist-info → autogluon.multimodal-1.4.1b20250806.dist-info}/namespace_packages.txt +0 -0
  17. {autogluon.multimodal-1.4.1b20250804.dist-info → autogluon.multimodal-1.4.1b20250806.dist-info}/top_level.txt +0 -0
  18. {autogluon.multimodal-1.4.1b20250804.dist-info → autogluon.multimodal-1.4.1b20250806.dist-info}/zip-safe +0 -0
@@ -134,7 +134,7 @@ class MultiImageMixDataset(torch.utils.data.Dataset):
134
134
  per_ret = apply_data_processor(
135
135
  per_sample_features=per_sample_features,
136
136
  data_processors=per_processors_group,
137
- feature_modalities=getattr(self, f"modality_types_{group_id}"),
137
+ data_types=getattr(self, f"modality_types_{group_id}"),
138
138
  is_training=self.is_training,
139
139
  load_only=True,
140
140
  )
@@ -236,7 +236,7 @@ class DocumentProcessor(ImageProcessor):
236
236
  def process_one_sample(
237
237
  self,
238
238
  document_features: Dict[str, Union[NDArray, list]],
239
- feature_modalities: Dict[str, Union[NDArray, list]],
239
+ data_types: Dict[str, Union[NDArray, list]],
240
240
  is_training: bool,
241
241
  image_mode: Optional[str] = "RGB",
242
242
  ):
@@ -247,8 +247,8 @@ class DocumentProcessor(ImageProcessor):
247
247
  ----------
248
248
  document_features
249
249
  One sample has one document image column in a pd.DataFrame.
250
- feature_modalities
251
- What modality each column belongs to.
250
+ data_types
251
+ Data type of all columns.
252
252
  is_training
253
253
  Whether to process document images in the training mode.
254
254
  image_mode
@@ -263,7 +263,7 @@ class DocumentProcessor(ImageProcessor):
263
263
  for per_col_name, per_col_image_features in document_features.items():
264
264
  try:
265
265
  # Process PDF documents.
266
- if feature_modalities[per_col_name] == DOCUMENT_PDF:
266
+ if data_types[per_col_name] == DOCUMENT_PDF:
267
267
  from pdf2image import convert_from_path
268
268
 
269
269
  # Convert PDF to PIL images.
@@ -388,7 +388,7 @@ class DocumentProcessor(ImageProcessor):
388
388
  def __call__(
389
389
  self,
390
390
  all_features: Dict[str, Union[NDArray, list]],
391
- feature_modalities: Dict[str, Union[NDArray, list]],
391
+ data_types: Dict[str, Union[NDArray, list]],
392
392
  is_training: bool,
393
393
  ) -> Dict:
394
394
  """
@@ -406,6 +406,6 @@ class DocumentProcessor(ImageProcessor):
406
406
  A dictionary containing one sample's features and/or labels.
407
407
  """
408
408
 
409
- ret = self.process_one_sample(all_features, feature_modalities, is_training)
409
+ ret = self.process_one_sample(all_features, data_types, is_training)
410
410
 
411
411
  return ret
@@ -171,7 +171,7 @@ class MMDetProcessor(MMLabProcessor):
171
171
  def __call__(
172
172
  self,
173
173
  images: Dict[str, List[str]],
174
- feature_modalities: Dict[str, Union[int, float, list]],
174
+ data_types: Dict[str, Union[int, float, list]],
175
175
  is_training: bool,
176
176
  load_only: bool = False,
177
177
  ) -> Dict:
@@ -182,8 +182,8 @@ class MMDetProcessor(MMLabProcessor):
182
182
  ----------
183
183
  images
184
184
  Images of one sample.
185
- feature_modalities
186
- The modality of the feature columns.
185
+ data_types
186
+ Data types of all columns.
187
187
  is_training
188
188
  Whether to process images in the training mode.
189
189
  load_only
@@ -171,7 +171,7 @@ class MMLabProcessor:
171
171
  def __call__(
172
172
  self,
173
173
  images: Dict[str, List[str]],
174
- feature_modalities: Dict[str, Union[int, float, list]],
174
+ data_types: Dict[str, Union[int, float, list]],
175
175
  is_training: bool,
176
176
  ) -> Dict:
177
177
  """
@@ -181,8 +181,8 @@ class MMLabProcessor:
181
181
  ----------
182
182
  images
183
183
  Images of one sample.
184
- feature_modalities
185
- The modality of the feature columns.
184
+ data_types
185
+ Data type of all columns.
186
186
  is_training
187
187
  Whether to process images in the training mode.
188
188
 
@@ -94,7 +94,7 @@ class NerProcessor:
94
94
  def process_ner(
95
95
  self,
96
96
  all_features: Dict[str, Union[int, float, list]],
97
- feature_modalities: Dict[str, Union[int, float, list]],
97
+ data_types: Dict[str, Union[int, float, list]],
98
98
  is_training: bool,
99
99
  ) -> Dict:
100
100
  """
@@ -105,8 +105,8 @@ class NerProcessor:
105
105
  ----------
106
106
  all_features
107
107
  All features including text and ner annotations.
108
- feature_modalities
109
- The modality of the feature columns.
108
+ data_types
109
+ Data type of all columns.
110
110
 
111
111
  Returns
112
112
  -------
@@ -117,7 +117,7 @@ class NerProcessor:
117
117
  if self.max_len is not None and self.tokenizer.model_max_length > 10**6:
118
118
  self.tokenizer.model_max_length = self.max_len
119
119
  text_column, annotation_column = None, None
120
- for column_name, column_modality in feature_modalities.items():
120
+ for column_name, column_modality in data_types.items():
121
121
  if column_modality.startswith((TEXT_NER, TEXT)):
122
122
  text_column = column_name
123
123
  if column_modality == NER_ANNOTATION:
@@ -335,7 +335,7 @@ class NerProcessor:
335
335
  def __call__(
336
336
  self,
337
337
  all_features: Dict[str, Union[NDArray, list]],
338
- feature_modalities: Dict[str, Union[NDArray, list]],
338
+ data_types: Dict[str, Union[NDArray, list]],
339
339
  is_training: bool,
340
340
  ) -> Dict:
341
341
  """
@@ -354,6 +354,6 @@ class NerProcessor:
354
354
  """
355
355
  ret = {}
356
356
  if self.prefix == NER_TEXT:
357
- ret = self.process_ner(all_features, feature_modalities, is_training)
357
+ ret = self.process_ner(all_features, data_types, is_training)
358
358
 
359
359
  return ret
@@ -157,7 +157,7 @@ class SemanticSegImageProcessor(ImageProcessor):
157
157
  def process_one_sample(
158
158
  self,
159
159
  image_features: Dict[str, Union[List[str], List[bytearray]]],
160
- feature_modalities: Dict[str, List[str]],
160
+ data_types: Dict[str, List[str]],
161
161
  is_training: bool,
162
162
  image_mode: Optional[str] = "RGB",
163
163
  ) -> Dict:
@@ -169,8 +169,8 @@ class SemanticSegImageProcessor(ImageProcessor):
169
169
  image_features
170
170
  One sample may have multiple image columns in a pd.DataFrame and multiple images
171
171
  inside each image column.
172
- feature_modalities
173
- What modality each column belongs to.
172
+ data_types
173
+ Data type of all columns.
174
174
  is_training
175
175
  Whether to process images in the training mode.
176
176
  image_mode
@@ -190,7 +190,7 @@ class SemanticSegImageProcessor(ImageProcessor):
190
190
 
191
191
  ret = {}
192
192
  annotation_column = None
193
- for column_name, column_modality in feature_modalities.items():
193
+ for column_name, column_modality in data_types.items():
194
194
  if column_modality == SEMANTIC_SEGMENTATION_IMG:
195
195
  image_column = column_name
196
196
  if column_modality == SEMANTIC_SEGMENTATION_GT:
@@ -276,7 +276,7 @@ class SemanticSegImageProcessor(ImageProcessor):
276
276
  def __call__(
277
277
  self,
278
278
  images: Dict[str, List[str]],
279
- feature_modalities: Dict[str, Union[int, float, list]],
279
+ data_types: Dict[str, Union[int, float, list]],
280
280
  is_training: bool,
281
281
  ) -> Dict:
282
282
  """
@@ -286,8 +286,8 @@ class SemanticSegImageProcessor(ImageProcessor):
286
286
  ----------
287
287
  images
288
288
  Images of one sample.
289
- feature_modalities
290
- The modality of the feature columns.
289
+ data_types
290
+ Data type of all columns.
291
291
  is_training
292
292
  Whether to process images in the training mode.
293
293
 
@@ -296,7 +296,7 @@ class SemanticSegImageProcessor(ImageProcessor):
296
296
  A dictionary containing one sample's processed images and their number.
297
297
  """
298
298
  images = {k: [v] if isinstance(v, str) else v for k, v in images.items()}
299
- return self.process_one_sample(images, feature_modalities, is_training)
299
+ return self.process_one_sample(images, data_types, is_training)
300
300
 
301
301
  def get_train_transforms(self, train_transforms):
302
302
  train_trans = []
@@ -711,6 +711,10 @@ class ObjectDetectionLearner(BaseLearner):
711
711
  and sample number.
712
712
  save_results
713
713
  Whether to save the prediction results (only works for detection now)
714
+ **kwargs
715
+ Additional arguments including:
716
+ - result_save_path (str, optional): Custom path to save results. If not provided,
717
+ uses default path setup.
714
718
 
715
719
  Returns
716
720
  -------
@@ -758,8 +762,14 @@ class ObjectDetectionLearner(BaseLearner):
758
762
  old_save_path=self._save_path,
759
763
  warn_if_exist=False,
760
764
  )
761
- if as_coco:
765
+ custom_save_path = kwargs.get('result_save_path')
766
+ if custom_save_path:
767
+ result_path = custom_save_path
768
+ elif as_coco:
762
769
  result_path = os.path.join(self._save_path, "result.json")
770
+ else:
771
+ result_path = os.path.join(self._save_path, "result.txt")
772
+ if as_coco:
763
773
  save_result_coco_format(
764
774
  data_path=data_path,
765
775
  pred=pred,
@@ -990,6 +990,9 @@ def cocoeval_pycocotools(
990
990
  coco_dataset.save_result(ret, data, cache_path)
991
991
 
992
992
  cocoGt = COCO(anno_file)
993
+ # https://github.com/ppwwyyxx/cocoapi/commit/617836ce3551927ec94e2024b18d6c899226a742#diff-51af02f519555c402db0216fd229e4fcb51fe55c25f446e7af8890d73269c7bdR313-R314
994
+ if "info" not in cocoGt.dataset:
995
+ cocoGt.dataset["info"] = ""
993
996
  cocoDt = cocoGt.loadRes(cache_path)
994
997
  annType = "bbox"
995
998
 
@@ -1607,14 +1610,14 @@ def save_result_coco_format(data_path, pred, category_ids, result_path, coco_roo
1607
1610
  result_name, _ = os.path.splitext(result_path)
1608
1611
  result_path = result_name + ".json"
1609
1612
  coco_dataset.save_result(pred, from_coco_or_voc(data_path, "test", coco_root=coco_root), save_path=result_path)
1610
- logger.info(25, f"Saved detection result to {result_path}")
1613
+ logger.info(f"Saved detection result to {result_path}")
1611
1614
 
1612
1615
 
1613
1616
  def save_result_voc_format(pred, result_path):
1614
1617
  result_name, _ = os.path.splitext(result_path)
1615
1618
  result_path = result_name + ".npy"
1616
1619
  np.save(result_path, pred)
1617
- logger.info(25, f"Saved detection result to {result_path}")
1620
+ logger.info(f"Saved detection result to {result_path}")
1618
1621
 
1619
1622
 
1620
1623
  def convert_pred_to_xywh(pred: Optional[List]) -> Optional[List]:
@@ -1,4 +1,4 @@
1
1
  """This is the autogluon version file."""
2
2
 
3
- __version__ = "1.4.1b20250804"
3
+ __version__ = "1.4.1b20250806"
4
4
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.multimodal
3
- Version: 1.4.1b20250804
3
+ Version: 1.4.1b20250806
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -43,8 +43,8 @@ Requires-Dist: scikit-learn<1.8.0,>=1.4.0
43
43
  Requires-Dist: Pillow<12,>=10.0.1
44
44
  Requires-Dist: tqdm<5,>=4.38
45
45
  Requires-Dist: boto3<2,>=1.10
46
- Requires-Dist: torch<2.8,>=2.2
47
- Requires-Dist: lightning<2.8,>=2.2
46
+ Requires-Dist: torch<2.8,>=2.6
47
+ Requires-Dist: lightning<2.8,>=2.5.1
48
48
  Requires-Dist: transformers[sentencepiece]<4.50,>=4.38.0
49
49
  Requires-Dist: accelerate<2.0,>=0.34.0
50
50
  Requires-Dist: fsspec[http]<=2025.3
@@ -53,14 +53,14 @@ Requires-Dist: jsonschema<4.24,>=4.18
53
53
  Requires-Dist: seqeval<1.3.0,>=1.2.2
54
54
  Requires-Dist: evaluate<0.5.0,>=0.4.0
55
55
  Requires-Dist: timm<1.0.7,>=0.9.5
56
- Requires-Dist: torchvision<0.23.0,>=0.16.0
56
+ Requires-Dist: torchvision<0.23.0,>=0.21.0
57
57
  Requires-Dist: scikit-image<0.26.0,>=0.19.1
58
58
  Requires-Dist: text-unidecode<1.4,>=1.3
59
59
  Requires-Dist: torchmetrics<1.8,>=1.2.0
60
60
  Requires-Dist: omegaconf<2.4.0,>=2.1.1
61
- Requires-Dist: autogluon.core[raytune]==1.4.1b20250804
62
- Requires-Dist: autogluon.features==1.4.1b20250804
63
- Requires-Dist: autogluon.common==1.4.1b20250804
61
+ Requires-Dist: autogluon.core[raytune]==1.4.1b20250806
62
+ Requires-Dist: autogluon.features==1.4.1b20250806
63
+ Requires-Dist: autogluon.common==1.4.1b20250806
64
64
  Requires-Dist: pytorch-metric-learning<2.9,>=1.3.0
65
65
  Requires-Dist: nlpaug<1.2.0,>=1.1.10
66
66
  Requires-Dist: nltk<3.10,>=3.4.5
@@ -1,8 +1,8 @@
1
- autogluon.multimodal-1.4.1b20250804-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
1
+ autogluon.multimodal-1.4.1b20250806-py3.9-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
2
2
  autogluon/multimodal/__init__.py,sha256=6WuDt3sMP1QLJs9se-20mmHCsFR-q2ZttPc8s0k1QWA,204
3
3
  autogluon/multimodal/constants.py,sha256=eLF3t-447nw3aNrYPh4Y8pycdECGu__wv6TC-amfXw8,9509
4
4
  autogluon/multimodal/predictor.py,sha256=4lou5yGysY1O86A-PX8AgsJvjB5bq2eHJ9zB1DyZFew,42847
5
- autogluon/multimodal/version.py,sha256=-VcYcuqz0tj--f0asYTM3jgAZQFCP6FL7XEfvZ1uIVM,91
5
+ autogluon/multimodal/version.py,sha256=FExUXdidHlJIW0FUwXAosP_z3-r9rAGcWmM40IGvRAg,91
6
6
  autogluon/multimodal/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  autogluon/multimodal/cli/prepare_detection_dataset.py,sha256=9NCYmCUMPRWbxxbN_C7YQjMYlrAm8gbwJ3Qd-79JWH4,5218
8
8
  autogluon/multimodal/cli/voc2coco.py,sha256=LXVu9RIfOZs_1URrzO-_3Nrvb9uGEgPxCY4-B6m1coc,9605
@@ -46,12 +46,12 @@ autogluon/multimodal/data/mixup.py,sha256=zYj3tgKxE25868bbBuQEiAZnch-yHR5bqJWk9c
46
46
  autogluon/multimodal/data/nlpaug.py,sha256=2Dh_Q_CL3DQtMUW_4YyHKdyIoOnPgIFMq5xA3ZMrYfY,2549
47
47
  autogluon/multimodal/data/preprocess_dataframe.py,sha256=CgdlqqkBRy0tRQj-ZHYFhE-oExJ7_wYHFuqXGTcIh7o,35801
48
48
  autogluon/multimodal/data/process_categorical.py,sha256=B4OXfMdi7cJwsR9gi0QF9s6Bm3OtmfAcokMrJyqyhME,4819
49
- autogluon/multimodal/data/process_document.py,sha256=OGjKCXKoOBKMJnigFo5R61n8XjJoJfv9emETrTbX1do,14825
49
+ autogluon/multimodal/data/process_document.py,sha256=5oT9FO0ulzvES1moxuZ_KjRjIz8XZkIQ5Z45xWNu6vE,14773
50
50
  autogluon/multimodal/data/process_image.py,sha256=LN_xiN8B6NiGlra3LGMGveZURyfgqVbwuEhcF3Ljz0U,15150
51
51
  autogluon/multimodal/data/process_label.py,sha256=WnRTD4gFtlcD3LW9BL3ML3OSYHH388_4b0LOl1YYDOk,2858
52
- autogluon/multimodal/data/process_ner.py,sha256=AalZsFRr5fHB4s2duW3hqaSH_kxz12CgEiMV0bcT79M,13656
52
+ autogluon/multimodal/data/process_ner.py,sha256=Azn6oc3Tymn_qxxsmnWk-9sKNujrlI0pTa8nlaqeJzg,13605
53
53
  autogluon/multimodal/data/process_numerical.py,sha256=9ttKKCxk0EybZNQTuKlRwdHbbIvR4o944oUnU6o6M6Y,4883
54
- autogluon/multimodal/data/process_semantic_seg_img.py,sha256=0MEoSGwDIpnX6JvzrV8ygi8vwtmc_aOINyaBVgeNJzI,11793
54
+ autogluon/multimodal/data/process_semantic_seg_img.py,sha256=9b-3MKdT6kYVzy97E8RkgxMcB2qtbHZxZGIRAaSGkyo,11722
55
55
  autogluon/multimodal/data/process_text.py,sha256=akQqiufnWmWZbnZRqCRISWpV3pQMDH1lZbnFdIADJOs,20118
56
56
  autogluon/multimodal/data/randaug.py,sha256=iidjz4CtqWnhxtJKafr7yNoJOpmwdlYyMWHrpVoejY0,7005
57
57
  autogluon/multimodal/data/template_engine.py,sha256=r57P_eLSSkjgI5B8czow7CNxlPsqqaDdPlaMTqVlHUw,3433
@@ -59,10 +59,10 @@ autogluon/multimodal/data/templates.py,sha256=UwElnQvBE2qZtnv3-1E8nQhOmcVzcFfonR
59
59
  autogluon/multimodal/data/trivial_augmenter.py,sha256=ciYEJsJJjCEgu9TBqPO5cTjd504eqdZQwaE5uYu2bS0,9164
60
60
  autogluon/multimodal/data/utils.py,sha256=kFK_pLyaAzw6mbed_PlY-fF6pyw6Gw6Y2mTuC8hcJbk,28900
61
61
  autogluon/multimodal/data/dataset_mmlab/__init__.py,sha256=MXibqfVtAX2jjveMUtdHmSH6SabXEDrAOfZzTs3pK3Y,119
62
- autogluon/multimodal/data/dataset_mmlab/multi_image_mix_dataset.py,sha256=2rABeHdUo8S9Amv7wQqft80AASrfEtCDD5ixfs85jDc,32960
62
+ autogluon/multimodal/data/dataset_mmlab/multi_image_mix_dataset.py,sha256=K_bv63dW1TZk-yXqAtA-sdq5rqunsyHVgfXcSmXfQLI,32952
63
63
  autogluon/multimodal/data/process_mmlab/__init__.py,sha256=EWrLTx1ZcBdWDDPVirBW5VXonpKqY4jSPPmqYSwJbvY,84
64
- autogluon/multimodal/data/process_mmlab/process_mmdet.py,sha256=wWcoeLoostH4y9oilSuJJmOT0so7hUVBR3mDo4XFIzY,6884
65
- autogluon/multimodal/data/process_mmlab/process_mmlab_base.py,sha256=7RorcwDoMRfLgtFEmH26uEg0csKvxyD21gdPh-9N4YM,6540
64
+ autogluon/multimodal/data/process_mmlab/process_mmdet.py,sha256=sahxX1rlCYOqbeE0XMDq8raoF33bOXbh_UJbm8mGVtk,6858
65
+ autogluon/multimodal/data/process_mmlab/process_mmlab_base.py,sha256=LPhF0PmM0Zf91F7D1rA0hcWGFH9U4WM9Og2lbYpELnE,6513
66
66
  autogluon/multimodal/data/process_mmlab/process_mmocr.py,sha256=dwhPDq1A84eqwZHnQMZFyg9GcnFTEkMCLBhJfhUeqQk,2913
67
67
  autogluon/multimodal/learners/__init__.py,sha256=BnOY7nwfXJ6rjPawF1mebSxB5Jx-OdiGwc_P53kNXOc,294
68
68
  autogluon/multimodal/learners/base.py,sha256=YzJnVr_FamfJa9sSEQq4n7ekSz0QFjeWuZRMVAMiUSM,99640
@@ -70,7 +70,7 @@ autogluon/multimodal/learners/ensemble.py,sha256=yJOyGdgeqKeriSCdiv809pn8AEdx6uG
70
70
  autogluon/multimodal/learners/few_shot_svm.py,sha256=XWD7uufpyemGSM9z8rIXksbHvU3YRlgQ0Vq_Wm0Sxe0,23919
71
71
  autogluon/multimodal/learners/matching.py,sha256=l0gXRAECBlO7G9_pYSaytl3RDERngMNLm7pzxUcPYwg,88428
72
72
  autogluon/multimodal/learners/ner.py,sha256=guxVA3Oc9JMQXALir0mINdcU1UcbIcSqGoywbsQkibw,18820
73
- autogluon/multimodal/learners/object_detection.py,sha256=HszfWyYbxKZqHgXngFkR6EbVHGjFR1BSZ4PxxVx5c6o,30911
73
+ autogluon/multimodal/learners/object_detection.py,sha256=XIn7y_RkuFJDNOxmFy6l9S-34Y4e8jAnZ34i6NsjnLo,31363
74
74
  autogluon/multimodal/learners/semantic_segmentation.py,sha256=sO-Jl85IQlT3hrE7RAXzaYttJY65N-8D0UhMTJw6Acw,19900
75
75
  autogluon/multimodal/models/__init__.py,sha256=PWplL_fQEIKoKv3f7CSMXqZOMXQYNVUvsyt5s8MpRpk,1045
76
76
  autogluon/multimodal/models/adaptation_layers.py,sha256=NuzwU_ghk8D2axmDuD8UEZ_HamoMSCcKMV9DB1AYWAg,38425
@@ -143,7 +143,7 @@ autogluon/multimodal/utils/log.py,sha256=2NsqHauLivy8dt76BEVg33MW7FdpgQ1di21JwVk
143
143
  autogluon/multimodal/utils/matcher.py,sha256=FSLPXoaBAw3sRioHLPABls8RBtzbGJY0m46fLF4U6Ok,18300
144
144
  autogluon/multimodal/utils/misc.py,sha256=WaDWN-6xCCL4tCkxMr4VMb5oiNmmBLrWo5FC3bCQp2A,4772
145
145
  autogluon/multimodal/utils/mmcv.py,sha256=Jjg5PiPqiRNJk6yWkQQlNiqT7qhStN94QjqQsZO3uVw,922
146
- autogluon/multimodal/utils/object_detection.py,sha256=fHZxon5LoYRmNu_7jm_pDjesVxTa72nzZwgwP-5Fft8,53535
146
+ autogluon/multimodal/utils/object_detection.py,sha256=1Ll8utg0FRslAtzVKtmRAIXtVSMj-OlcFFLVdEn5IhM,53769
147
147
  autogluon/multimodal/utils/onnx.py,sha256=nyj0Zy5SzK0tRw4tO-BfsHwUh48UPHxp7mVQX0JiF-c,5517
148
148
  autogluon/multimodal/utils/path.py,sha256=snyfAMZTqa_v0pJTEBX-v56zcSuX6VoqXHrqbSGriso,219
149
149
  autogluon/multimodal/utils/precision.py,sha256=vcJDPIIn9mgTmba_m4sFqGYC0AmiIXiVSTusGr4RDFo,3757
@@ -153,11 +153,11 @@ autogluon/multimodal/utils/registry.py,sha256=vqvONWweZP44aBo1jCvlqLdL0Agn2kyTK8
153
153
  autogluon/multimodal/utils/save.py,sha256=aXZa_iue34dAEfTz7nCaRowktG1emEi5uVXe_tDmHBA,4408
154
154
  autogluon/multimodal/utils/strategy.py,sha256=tT9PWh_ZLwNdGFgPsXgZsgKRhpnfBQDjh1mB1_y8G18,833
155
155
  autogluon/multimodal/utils/visualizer.py,sha256=qAc4_36r3MBDPq1R1jBeb_gcfzIrsylL1S31sRf3wuw,22562
156
- autogluon.multimodal-1.4.1b20250804.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
157
- autogluon.multimodal-1.4.1b20250804.dist-info/METADATA,sha256=sR_ghQToJw__hkimVbN_ftJ-g6yLMfK81vvQ1ki-Tv4,13275
158
- autogluon.multimodal-1.4.1b20250804.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
159
- autogluon.multimodal-1.4.1b20250804.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
160
- autogluon.multimodal-1.4.1b20250804.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
161
- autogluon.multimodal-1.4.1b20250804.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
162
- autogluon.multimodal-1.4.1b20250804.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
163
- autogluon.multimodal-1.4.1b20250804.dist-info/RECORD,,
156
+ autogluon.multimodal-1.4.1b20250806.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
157
+ autogluon.multimodal-1.4.1b20250806.dist-info/METADATA,sha256=-MJeZX6e9L5_-cpWqBIspKowr5P340Lbr76alA4qwpE,13277
158
+ autogluon.multimodal-1.4.1b20250806.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
159
+ autogluon.multimodal-1.4.1b20250806.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
160
+ autogluon.multimodal-1.4.1b20250806.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
161
+ autogluon.multimodal-1.4.1b20250806.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
162
+ autogluon.multimodal-1.4.1b20250806.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
163
+ autogluon.multimodal-1.4.1b20250806.dist-info/RECORD,,