autogluon.multimodal 1.1.2b20240929__py3-none-any.whl → 1.1.2b20241004__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1477,7 +1477,7 @@ class BaseLearner(ExportMixin, DistillationMixin, RealtimeMixin):
1477
1477
  "state_dict": {"model." + name: param for name, param in self._model.state_dict().items()}
1478
1478
  }
1479
1479
 
1480
- torch.save(checkpoint, os.path.join(save_path, MODEL_CHECKPOINT))
1480
+ torch.save(checkpoint, os.path.join(save_path, MODEL_CHECKPOINT)) # nosec B614
1481
1481
 
1482
1482
  if clean_ckpts:
1483
1483
  # clean old checkpoints + the intermediate files stored
@@ -2112,9 +2112,9 @@ class BaseLearner(ExportMixin, DistillationMixin, RealtimeMixin):
2112
2112
 
2113
2113
  convert_zero_checkpoint_to_fp32_state_dict(path + "-dir", path)
2114
2114
  shutil.rmtree(path + "-dir")
2115
- state_dict = torch.load(path, map_location=torch.device("cpu"))["state_dict"]
2115
+ state_dict = torch.load(path, map_location=torch.device("cpu"))["state_dict"] # nosec B614
2116
2116
  else:
2117
- state_dict = torch.load(path, map_location=torch.device("cpu"))["state_dict"]
2117
+ state_dict = torch.load(path, map_location=torch.device("cpu"))["state_dict"] # nosec B614
2118
2118
  state_dict = {k.partition(prefix)[2]: v for k, v in state_dict.items() if k.startswith(prefix)}
2119
2119
 
2120
2120
  # Some buffers like `position_ids` are registered as persistent=False since transformers 4.31.0
@@ -2222,7 +2222,7 @@ class BaseLearner(ExportMixin, DistillationMixin, RealtimeMixin):
2222
2222
 
2223
2223
  if save_model:
2224
2224
  checkpoint = {"state_dict": {"model." + name: param for name, param in model.state_dict().items()}}
2225
- torch.save(checkpoint, os.path.join(os.path.abspath(path), MODEL_CHECKPOINT))
2225
+ torch.save(checkpoint, os.path.join(os.path.abspath(path), MODEL_CHECKPOINT)) # nosec B614
2226
2226
 
2227
2227
  @staticmethod
2228
2228
  def _load_metadata(
@@ -1057,7 +1057,7 @@ class MultiModalMatcher(BaseLearner):
1057
1057
  )
1058
1058
 
1059
1059
  checkpoint = {"state_dict": task.state_dict()}
1060
- torch.save(checkpoint, os.path.join(save_path, MODEL_CHECKPOINT))
1060
+ torch.save(checkpoint, os.path.join(save_path, MODEL_CHECKPOINT)) # nosec B614
1061
1061
 
1062
1062
  if clean_ckpts:
1063
1063
  # clean old checkpoints + the intermediate files stored
@@ -1841,7 +1841,7 @@ class MultiModalMatcher(BaseLearner):
1841
1841
  response_prefix: str = "response_model.",
1842
1842
  ):
1843
1843
  if state_dict is None:
1844
- state_dict = torch.load(path, map_location=torch.device("cpu"))["state_dict"]
1844
+ state_dict = torch.load(path, map_location=torch.device("cpu"))["state_dict"] # nosec B614
1845
1845
  query_state_dict = {
1846
1846
  k.partition(query_prefix)[2]: v for k, v in state_dict.items() if k.startswith(query_prefix)
1847
1847
  }
@@ -1986,7 +1986,7 @@ class MultiModalMatcher(BaseLearner):
1986
1986
  response_model=self._response_model,
1987
1987
  )
1988
1988
  checkpoint = {"state_dict": task.state_dict()}
1989
- torch.save(checkpoint, os.path.join(path, MODEL_CHECKPOINT))
1989
+ torch.save(checkpoint, os.path.join(path, MODEL_CHECKPOINT)) # nosec B614
1990
1990
 
1991
1991
  @staticmethod
1992
1992
  def _load_metadata(
@@ -609,12 +609,12 @@ class FT_Transformer(nn.Module):
609
609
 
610
610
  if pretrained and checkpoint_name:
611
611
  if os.path.exists(checkpoint_name):
612
- ckpt = torch.load(checkpoint_name)
612
+ ckpt = torch.load(checkpoint_name) # nosec B614
613
613
  else:
614
614
  with tempfile.TemporaryDirectory() as tmpdirname:
615
615
  checkpoint_path = os.path.join(tmpdirname, "./ft_transformer_pretrained.ckpt")
616
616
  download(checkpoint_name, checkpoint_path)
617
- ckpt = torch.load(checkpoint_path)
617
+ ckpt = torch.load(checkpoint_path) # nosec B614
618
618
  self.transformer.load_state_dict(ckpt["state_dict"])
619
619
 
620
620
  self.name_to_id = self.get_layer_ids()
@@ -169,7 +169,7 @@ class MMDetAutoModelForObjectDetection(nn.Module):
169
169
  if not save_path:
170
170
  save_path = f"./{self.checkpoint_name}_autogluon.pth"
171
171
 
172
- torch.save({"state_dict": self.model.state_dict(), "meta": {"CLASSES": self.model.CLASSES}}, save_path)
172
+ torch.save({"state_dict": self.model.state_dict(), "meta": {"CLASSES": self.model.CLASSES}}, save_path) # nosec B614
173
173
 
174
174
  def _save_configs(self, save_path=None):
175
175
  if not save_path:
@@ -500,7 +500,7 @@ class MMDetAutoModelForObjectDetection(nn.Module):
500
500
  """
501
501
  sd = source_path
502
502
 
503
- model_dict = torch.load(sd, map_location=torch.device("cpu"))
503
+ model_dict = torch.load(sd, map_location=torch.device("cpu")) # nosec B614
504
504
  if "state_dict" in model_dict:
505
505
  model_dict = model_dict["state_dict"]
506
506
  if "model" in model_dict:
@@ -617,6 +617,6 @@ class MMDetAutoModelForObjectDetection(nn.Module):
617
617
  data = {"state_dict": new_dict}
618
618
 
619
619
  target_directory = os.path.splitext(sd)[0] + f"_cvt.pth"
620
- torch.save(data, target_directory)
620
+ torch.save(data, target_directory) # nosec B614
621
621
 
622
622
  return target_directory
@@ -294,7 +294,7 @@ class TimmAutoModelForImagePrediction(nn.Module):
294
294
 
295
295
  def save(self, save_path: str = "./", tokenizers: Optional[dict] = None):
296
296
  weights_path = f"{save_path}/pytorch_model.bin"
297
- torch.save(self.model.state_dict(), weights_path)
297
+ torch.save(self.model.state_dict(), weights_path) # nosec B614
298
298
  logger.info(f"Model {self.prefix} weights saved to {weights_path}.")
299
299
  config_path = f"{save_path}/config.json"
300
300
  self.dump_config(config_path)
@@ -92,10 +92,10 @@ class DDPPredictionWriter(BasePredictionWriter):
92
92
  """
93
93
  # this will create N (num processes) files in `cache_dir` each containing
94
94
  # the predictions of its respective rank
95
- torch.save(predictions, self.get_predictions_cache_dir(trainer.global_rank))
95
+ torch.save(predictions, self.get_predictions_cache_dir(trainer.global_rank)) # nosec B614
96
96
  # here we save `batch_indices` to get the information about the data index
97
97
  # from prediction data
98
- torch.save(batch_indices, self.get_batch_indices_cache_dir(trainer.global_rank))
98
+ torch.save(batch_indices, self.get_batch_indices_cache_dir(trainer.global_rank)) # nosec B614
99
99
 
100
100
  def read_single_gpu_results(self, global_rank: Optional[int]):
101
101
  """
@@ -109,8 +109,8 @@ class DDPPredictionWriter(BasePredictionWriter):
109
109
  while (not os.path.exists(sample_indices_file)) or (not os.path.exists(predictions_file)):
110
110
  logger.info(f"waiting for rank #{global_rank} to finish saving predictions...")
111
111
  time.sleep(self.sleep_time)
112
- sample_indices = torch.load(sample_indices_file)
113
- predictions = torch.load(predictions_file)
112
+ sample_indices = torch.load(sample_indices_file) # nosec B614
113
+ predictions = torch.load(predictions_file) # nosec B614
114
114
 
115
115
  return sample_indices, predictions
116
116
 
@@ -40,9 +40,9 @@ def average_checkpoints(
40
40
 
41
41
  convert_zero_checkpoint_to_fp32_state_dict(per_path + "-dir", per_path)
42
42
  shutil.rmtree(per_path + "-dir")
43
- state_dict = torch.load(per_path, map_location=torch.device("cpu"))["state_dict"]
43
+ state_dict = torch.load(per_path, map_location=torch.device("cpu"))["state_dict"] # nosec B614
44
44
  else:
45
- state_dict = torch.load(per_path, map_location=torch.device("cpu"))["state_dict"]
45
+ state_dict = torch.load(per_path, map_location=torch.device("cpu"))["state_dict"] # nosec B614
46
46
  for k, v in state_dict.items():
47
47
  if k not in avg_state_dict:
48
48
  avg_state_dict[k] = v.clone().to(dtype=torch.float64)
@@ -60,7 +60,7 @@ def average_checkpoints(
60
60
  for k in avg_state_dict:
61
61
  avg_state_dict[k].clamp_(float32_info.min, float32_info.max).to(dtype=torch.float32)
62
62
  else:
63
- avg_state_dict = torch.load(checkpoint_paths[0], map_location=torch.device("cpu"))["state_dict"]
63
+ avg_state_dict = torch.load(checkpoint_paths[0], map_location=torch.device("cpu"))["state_dict"] # nosec B614
64
64
 
65
65
  return avg_state_dict
66
66
 
@@ -48,7 +48,7 @@ def _load(
48
48
  """
49
49
  if not isinstance(path_or_url, (str, Path)):
50
50
  # any sort of BytesIO or similar
51
- return torch.load(path_or_url, map_location=map_location)
51
+ return torch.load(path_or_url, map_location=map_location) # nosec B614
52
52
  if str(path_or_url).startswith("http"):
53
53
  return torch.hub.load_state_dict_from_url(
54
54
  str(path_or_url),
@@ -56,7 +56,7 @@ def _load(
56
56
  )
57
57
  fs = get_filesystem(path_or_url)
58
58
  with fs.open(path_or_url, "rb") as f:
59
- return torch.load(f, map_location=map_location)
59
+ return torch.load(f, map_location=map_location) # nosec B614
60
60
 
61
61
 
62
62
  def get_filesystem(path: _PATH, **kwargs: Any) -> AbstractFileSystem:
@@ -75,6 +75,6 @@ def _atomic_save(checkpoint: Dict[str, Any], filepath: Union[str, Path]) -> None
75
75
  This points to the file that the checkpoint will be stored in.
76
76
  """
77
77
  bytesbuffer = io.BytesIO()
78
- torch.save(checkpoint, bytesbuffer)
78
+ torch.save(checkpoint, bytesbuffer) # nosec B614
79
79
  with fsspec.open(filepath, "wb") as f:
80
80
  f.write(bytesbuffer.getvalue())
@@ -1,3 +1,3 @@
1
1
  """This is the autogluon version file."""
2
- __version__ = '1.1.2b20240929'
2
+ __version__ = '1.1.2b20241004'
3
3
  __lite__ = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: autogluon.multimodal
3
- Version: 1.1.2b20240929
3
+ Version: 1.1.2b20241004
4
4
  Summary: Fast and Accurate ML in 3 Lines of Code
5
5
  Home-page: https://github.com/autogluon/autogluon
6
6
  Author: AutoGluon Community
@@ -24,7 +24,6 @@ Classifier: Operating System :: Microsoft :: Windows
24
24
  Classifier: Operating System :: POSIX
25
25
  Classifier: Operating System :: Unix
26
26
  Classifier: Programming Language :: Python :: 3
27
- Classifier: Programming Language :: Python :: 3.8
28
27
  Classifier: Programming Language :: Python :: 3.9
29
28
  Classifier: Programming Language :: Python :: 3.10
30
29
  Classifier: Programming Language :: Python :: 3.11
@@ -32,7 +31,7 @@ Classifier: Topic :: Software Development
32
31
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
33
32
  Classifier: Topic :: Scientific/Engineering :: Information Analysis
34
33
  Classifier: Topic :: Scientific/Engineering :: Image Recognition
35
- Requires-Python: >=3.8, <3.12
34
+ Requires-Python: >=3.9, <3.12
36
35
  Description-Content-Type: text/markdown
37
36
  Requires-Dist: numpy<1.29,>=1.21
38
37
  Requires-Dist: scipy<1.13,>=1.5.4
@@ -56,9 +55,9 @@ Requires-Dist: text-unidecode<1.4,>=1.3
56
55
  Requires-Dist: torchmetrics<1.3.0,>=1.2.0
57
56
  Requires-Dist: nptyping<2.5.0,>=1.4.4
58
57
  Requires-Dist: omegaconf<2.3.0,>=2.1.1
59
- Requires-Dist: autogluon.core[raytune]==1.1.2b20240929
60
- Requires-Dist: autogluon.features==1.1.2b20240929
61
- Requires-Dist: autogluon.common==1.1.2b20240929
58
+ Requires-Dist: autogluon.core[raytune]==1.1.2b20241004
59
+ Requires-Dist: autogluon.features==1.1.2b20241004
60
+ Requires-Dist: autogluon.common==1.1.2b20241004
62
61
  Requires-Dist: pytorch-metric-learning<2.4,>=1.3.0
63
62
  Requires-Dist: nlpaug<1.2.0,>=1.1.10
64
63
  Requires-Dist: nltk<4.0.0,>=3.4.5
@@ -1,11 +1,11 @@
1
- autogluon.multimodal-1.1.2b20240929-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
1
+ autogluon.multimodal-1.1.2b20241004-py3.8-nspkg.pth,sha256=cQGwpuGPqg1GXscIwt-7PmME1OnSpD-7ixkikJ31WAY,554
2
2
  autogluon/multimodal/__init__.py,sha256=EuWb-QmtFBKePJw4_4Kpp9dKrabv121haYw_Oiu2jfI,238
3
3
  autogluon/multimodal/constants.py,sha256=lFA03ZQeZlp8mwuXLuMOgeByljV5wPYBjN_hi6Xc8zg,8498
4
4
  autogluon/multimodal/predictor.py,sha256=VTJGcKH4Kktdm4Qq2x9oRThpfp6w_kFSjJOmQPsmB5g,40654
5
5
  autogluon/multimodal/presets.py,sha256=VR_arn7X4eiQcGcvJVmwxDopPJGvYP1W1cBZ2AOcdJM,25882
6
6
  autogluon/multimodal/problem_types.py,sha256=JPSoV3Fg-NGQansRlyT2bPZG3iIkgWo7eB8oDoAfg90,9201
7
7
  autogluon/multimodal/registry.py,sha256=vqvONWweZP44aBo1jCvlqLdL0Agn2kyTK8uXUwagZhs,3670
8
- autogluon/multimodal/version.py,sha256=B_5R2gJOIzlEspA0BTH-U4wf4dGyTk65yxEBY4M24oQ,90
8
+ autogluon/multimodal/version.py,sha256=L2hVFidey__bEnBXy5SWyeUAAzS95yqTpOPre6ATuMg,90
9
9
  autogluon/multimodal/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  autogluon/multimodal/cli/prepare_detection_dataset.py,sha256=9NCYmCUMPRWbxxbN_C7YQjMYlrAm8gbwJ3Qd-79JWH4,5218
11
11
  autogluon/multimodal/cli/voc2coco.py,sha256=LXVu9RIfOZs_1URrzO-_3Nrvb9uGEgPxCY4-B6m1coc,9605
@@ -67,9 +67,9 @@ autogluon/multimodal/data/process_mmlab/process_mmdet.py,sha256=K0d3o3j3cK37ocKk
67
67
  autogluon/multimodal/data/process_mmlab/process_mmlab_base.py,sha256=961gctp4XcW_qsl0b1dg7JUxf93mINGY49HsIIYQR9Y,6698
68
68
  autogluon/multimodal/data/process_mmlab/process_mmocr.py,sha256=ZdwmU65YmRgeSGFowDUTJvUZUWI0CLsUe87EcPG_gEs,3095
69
69
  autogluon/multimodal/learners/__init__.py,sha256=fCei0O1w79sNdirFgNrtWhFIJ-XUOQ2r9D8lQaIunt8,258
70
- autogluon/multimodal/learners/base.py,sha256=pGCc9giLiopZRGgBwx0bUswuDvSC7ogeQscUfMAQReE,100792
70
+ autogluon/multimodal/learners/base.py,sha256=IUHRBzwrKqAwo9nDsqzg0rBQaFiVxfyeMndYgeBuHgk,100848
71
71
  autogluon/multimodal/learners/few_shot_svm.py,sha256=TXauhXr_2hWqaEwO8XhFxWRRPXDYxLpmmKYaCrxFWPM,23934
72
- autogluon/multimodal/learners/matching.py,sha256=vxKqRqe0_LYliEYepkL06CZUkzmCs7TnDTcxxp5TDCU,89849
72
+ autogluon/multimodal/learners/matching.py,sha256=gueWrqy0g9gVbyBvQOAj03JgBwbJsBXeLLtKsiTzrnU,89891
73
73
  autogluon/multimodal/learners/ner.py,sha256=0VZl_Z1O98A5mOSw8Ee8F9foaCT684DT0j1ALx-8RU4,19035
74
74
  autogluon/multimodal/learners/object_detection.py,sha256=JRxXaI33Um0BWGsRRKFfLy7tAfqJj2SsT_DqIL1igKo,29015
75
75
  autogluon/multimodal/learners/semantic_segmentation.py,sha256=cy2ALYTtqeSnPo75htgr5STZ_utgkzeGxp5j4J1mScc,20183
@@ -79,17 +79,17 @@ autogluon/multimodal/models/categorical_mlp.py,sha256=R4qNo2eJ2B4FKIpTE5HXJezT0Q
79
79
  autogluon/multimodal/models/clip.py,sha256=hbIV1jsomCZXg6RF6R5jDpxESlBq46hInQ2S5Y4gJBM,8875
80
80
  autogluon/multimodal/models/custom_transformer.py,sha256=jOqe6dSMsvhqagUc5abB2Nu5VUwODn_frtFVTIA5WgY,27581
81
81
  autogluon/multimodal/models/document_transformer.py,sha256=_-hcnR0qRyzTgEmCRmnzLSChq3l7dIiXT60uDSXXT_M,7114
82
- autogluon/multimodal/models/ft_transformer.py,sha256=09DlnmJZAL9io6h8tdYL0sSy59-1TxIjmZ7Hv3MvYHg,26346
82
+ autogluon/multimodal/models/ft_transformer.py,sha256=KfdMDVh9B740qeozXfPFll8ZeC8jAzsgz4LdGAgFkQc,26374
83
83
  autogluon/multimodal/models/huggingface_text.py,sha256=QPkxuU6d5V1XtjrSmPPPt89CW4_19QAMZHZC_lfAlQM,11871
84
84
  autogluon/multimodal/models/mlp.py,sha256=KZt10QjP_C9e6L0HUtGef8AqWFR2kxAXsZAH7_iK20Y,4456
85
- autogluon/multimodal/models/mmdet_image.py,sha256=f6kPX6_mBcOdvYKg7crHpjYHsUsDnfFr9Noixm4gWwI,27070
85
+ autogluon/multimodal/models/mmdet_image.py,sha256=gdgoyBXVyiXMDhnKwFPafpgxv4PhFXFt4PZ_TLsW22I,27112
86
86
  autogluon/multimodal/models/mmocr_text_detection.py,sha256=7_u5EuylPFilSbGzcF2iVEaHBaoxOKdUzA41lODAV08,3684
87
87
  autogluon/multimodal/models/mmocr_text_recognition.py,sha256=DlcAqE2TM5yFm835EBHRAR7oY3lVLpgQMpHRKhhVho8,3994
88
88
  autogluon/multimodal/models/ner_text.py,sha256=Fxba7zFiAS501ZdMxDYQIlQv2LpxIXXc4V-nQO5svhU,10205
89
89
  autogluon/multimodal/models/numerical_mlp.py,sha256=DtWsYJOpTCDHL5hTOguDTlMSy4PfT4Ch0hhTpHM0Z4s,4119
90
90
  autogluon/multimodal/models/sam.py,sha256=iXrSOMpG5ISOXALxfaVDZR8zzS2r4pdAd8RH-0jT_3M,18279
91
91
  autogluon/multimodal/models/t_few.py,sha256=Hcrkip44jfGvLVW4BSQuUimtWv92yaPEnf-S5SPT1SY,13374
92
- autogluon/multimodal/models/timm_image.py,sha256=z6gFUza_NrR_DJ16LynWqoYYg031aYhH3tdgLTA2Qew,11772
92
+ autogluon/multimodal/models/timm_image.py,sha256=CCXiMnbPgI0rXAmsPkfW_vo0Pm1zF4mZ-DawepmXLAQ,11786
93
93
  autogluon/multimodal/models/utils.py,sha256=5Mh7Wp1ojlCC49EayH--Tz7bIbmVz6N31CpSTW12nxU,31697
94
94
  autogluon/multimodal/models/custom_hf_models/modeling_sam_for_conv_lora.py,sha256=zsdXyzF29x_os6L-Kjflmwn50fo5l7dQYirAA46Ts7A,66856
95
95
  autogluon/multimodal/models/fusion/__init__.py,sha256=Fy7eEsOddtGy5L0sav0pWHDRqgukKdCPJPXzmBEM-uk,196
@@ -110,9 +110,9 @@ autogluon/multimodal/optimization/lr_scheduler.py,sha256=i3GG7T8ZyPXyS7feUVe7W3o
110
110
  autogluon/multimodal/optimization/semantic_seg_metrics.py,sha256=tIbSk3iyBRRx7HnZdqIxltRBtDiBt-GX_zBxkMOFxQg,32894
111
111
  autogluon/multimodal/optimization/utils.py,sha256=X6UknHcN2XID9WO2N2Of3-7MbWUfZO7ydNOktwwtbiU,34415
112
112
  autogluon/multimodal/utils/__init__.py,sha256=SaUQE-TwodG9NVOchXGp-Fx1CKKZe9iUKhFD_cTiI0c,2883
113
- autogluon/multimodal/utils/cache.py,sha256=yqIcIQMShypBbmIMCp_qfMjb-q1791BsDv83jeP4n0g,7710
114
- autogluon/multimodal/utils/checkpoint.py,sha256=fVgrZbdVOv9yYVVCZO8BV0dQ0slzmLPFEDnG1FtY_hs,7657
115
- autogluon/multimodal/utils/cloud_io.py,sha256=8yA8WNtpsLbYJTRBLaOFQAawrRSd2FTMpZO8qpOX5wA,2989
113
+ autogluon/multimodal/utils/cache.py,sha256=XiLB5xNUYklDc8R9M-2RUD0Y6NEqrZIZx6O1PpRIXxI,7766
114
+ autogluon/multimodal/utils/checkpoint.py,sha256=Khx4KXqF0S9Aw193kyUWNvJX2XkFv6y4IGYe9-txLJE,7699
115
+ autogluon/multimodal/utils/cloud_io.py,sha256=FhIJ_oEerJ8QOZz82SzP3tg-Z1mo_vHqlmcShHe077s,3031
116
116
  autogluon/multimodal/utils/colormap.py,sha256=DOSPCgeQXk87B2ae3iM7T0RGjrIVozvwp7RHEXzyb-4,3882
117
117
  autogluon/multimodal/utils/config.py,sha256=MpzKrzFHoaIMtxuXSOzPK_krc8n0XzQk5KD5wrrAEAs,28778
118
118
  autogluon/multimodal/utils/data.py,sha256=kNTrWyD1N3M-skWHcH2T-Wfz-MmDnbOi92UOklds6KA,23979
@@ -135,11 +135,11 @@ autogluon/multimodal/utils/object_detection.py,sha256=EjLPzmq8Ge_HAtibiY5FNOChL_
135
135
  autogluon/multimodal/utils/onnx.py,sha256=rblWnphKTsfbosbieJu8PsH6SMDw4on9BS8bR1plL2U,5607
136
136
  autogluon/multimodal/utils/save.py,sha256=zYIO3mYMGBvHfZcmCUaLpsQa14nVq1LPv2F76uaz89w,3951
137
137
  autogluon/multimodal/utils/visualizer.py,sha256=qAc4_36r3MBDPq1R1jBeb_gcfzIrsylL1S31sRf3wuw,22562
138
- autogluon.multimodal-1.1.2b20240929.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
139
- autogluon.multimodal-1.1.2b20240929.dist-info/METADATA,sha256=73n6yALqpSETFu2hHK9-wpGBben2nRhmFEqCoDYxs1g,12802
140
- autogluon.multimodal-1.1.2b20240929.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
141
- autogluon.multimodal-1.1.2b20240929.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
142
- autogluon.multimodal-1.1.2b20240929.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
143
- autogluon.multimodal-1.1.2b20240929.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
144
- autogluon.multimodal-1.1.2b20240929.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
145
- autogluon.multimodal-1.1.2b20240929.dist-info/RECORD,,
138
+ autogluon.multimodal-1.1.2b20241004.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
139
+ autogluon.multimodal-1.1.2b20241004.dist-info/METADATA,sha256=ygJ_hemZ7IlF4nnDdCmhl22-cO4EprofyBC-2zy-ra4,12752
140
+ autogluon.multimodal-1.1.2b20241004.dist-info/NOTICE,sha256=7nPQuj8Kp-uXsU0S5so3-2dNU5EctS5hDXvvzzehd7E,114
141
+ autogluon.multimodal-1.1.2b20241004.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
142
+ autogluon.multimodal-1.1.2b20241004.dist-info/namespace_packages.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
143
+ autogluon.multimodal-1.1.2b20241004.dist-info/top_level.txt,sha256=giERA4R78OkJf2ijn5slgjURlhRPzfLr7waIcGkzYAo,10
144
+ autogluon.multimodal-1.1.2b20241004.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
145
+ autogluon.multimodal-1.1.2b20241004.dist-info/RECORD,,