aurora-model 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aurora_model-0.1.0/PKG-INFO +29 -0
- aurora_model-0.1.0/aurora/__init__.py +91 -0
- aurora_model-0.1.0/aurora/bert_config/config.json +23 -0
- aurora_model-0.1.0/aurora/bert_config/tokenizer.json +1 -0
- aurora_model-0.1.0/aurora/bert_config/tokenizer_config.json +1 -0
- aurora_model-0.1.0/aurora/config.json +37 -0
- aurora_model-0.1.0/aurora/configuration_aurora.py +62 -0
- aurora_model-0.1.0/aurora/flow_loss.py +254 -0
- aurora_model-0.1.0/aurora/generation_config.json +4 -0
- aurora_model-0.1.0/aurora/modality_connector.py +266 -0
- aurora_model-0.1.0/aurora/modeling_aurora.py +636 -0
- aurora_model-0.1.0/aurora/prototype_retriever.py +205 -0
- aurora_model-0.1.0/aurora/ts_generation_mixin.py +114 -0
- aurora_model-0.1.0/aurora/util_functions.py +154 -0
- aurora_model-0.1.0/aurora/utils/__init__.py +2 -0
- aurora_model-0.1.0/aurora/utils/path_utils.py +27 -0
- aurora_model-0.1.0/aurora/vit_config/config.json +21 -0
- aurora_model-0.1.0/aurora/vit_config/preprocessor_config.json +15 -0
- aurora_model-0.1.0/aurora_model.egg-info/PKG-INFO +29 -0
- aurora_model-0.1.0/aurora_model.egg-info/SOURCES.txt +23 -0
- aurora_model-0.1.0/aurora_model.egg-info/dependency_links.txt +1 -0
- aurora_model-0.1.0/aurora_model.egg-info/requires.txt +7 -0
- aurora_model-0.1.0/aurora_model.egg-info/top_level.txt +1 -0
- aurora_model-0.1.0/setup.cfg +4 -0
- aurora_model-0.1.0/setup.py +59 -0
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: aurora-model
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Aurora: A multimodal time series prediction model based on Transformers
|
|
5
|
+
Author: Your Name
|
|
6
|
+
Author-email: your_email@example.com
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: Intended Audience :: Science/Research
|
|
12
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Requires-Python: >=3.10
|
|
17
|
+
Requires-Dist: torch>=2.4.0
|
|
18
|
+
Requires-Dist: torchvision>=0.19.0
|
|
19
|
+
Requires-Dist: transformers>=4.50.0
|
|
20
|
+
Requires-Dist: huggingface_hub>=0.16.0
|
|
21
|
+
Requires-Dist: numpy>=1.21.0
|
|
22
|
+
Requires-Dist: scipy>=1.7.0
|
|
23
|
+
Requires-Dist: einops>=0.8.1
|
|
24
|
+
Dynamic: author
|
|
25
|
+
Dynamic: author-email
|
|
26
|
+
Dynamic: classifier
|
|
27
|
+
Dynamic: requires-dist
|
|
28
|
+
Dynamic: requires-python
|
|
29
|
+
Dynamic: summary
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# aurora/__init__.py
|
|
2
|
+
import torch
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from huggingface_hub import hf_hub_download
|
|
5
|
+
import os # ✅ Add os module to check file extension
|
|
6
|
+
|
|
7
|
+
# Import safetensors for loading .safetensors files
|
|
8
|
+
from safetensors.torch import load_file as safetensors_load_file
|
|
9
|
+
|
|
10
|
+
# Import your core model and config classes
|
|
11
|
+
from aurora.modeling_aurora import AuroraForPrediction
|
|
12
|
+
from aurora.configuration_aurora import AuroraConfig
|
|
13
|
+
from aurora.utils.path_utils import get_package_file_path
|
|
14
|
+
|
|
15
|
+
# Hardcode your HF repo info (modify to your actual repo)
|
|
16
|
+
DEFAULT_REPO_ID = "DecisionIntelligence/Aurora"
|
|
17
|
+
# ✅ Update default weights filename to .safetensors (adjust if your file has a different name)
|
|
18
|
+
DEFAULT_WEIGHTS_FILENAME = "model.safetensors"
|
|
19
|
+
|
|
20
|
+
# Package version
|
|
21
|
+
__version__ = "0.1.0"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def load_model(
|
|
25
|
+
repo_id: str = DEFAULT_REPO_ID,
|
|
26
|
+
weights_filename: str = DEFAULT_WEIGHTS_FILENAME,
|
|
27
|
+
cache_dir: Optional[str] = None,
|
|
28
|
+
force_download: bool = False,
|
|
29
|
+
device: Optional[str] = None
|
|
30
|
+
) -> AuroraForPrediction:
|
|
31
|
+
"""
|
|
32
|
+
Load the Aurora model with static files from the package and weights from HF Hub.
|
|
33
|
+
Supports both .safetensors (recommended) and .bin/.pt/.pth weight formats.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
repo_id: HF Hub repository ID (e.g., "username/repo-name")
|
|
37
|
+
weights_filename: Name of the weights file in the HF repo (default: model.safetensors)
|
|
38
|
+
cache_dir: Local directory to cache the downloaded weights (default: HF default cache)
|
|
39
|
+
force_download: Whether to force re-download the weights even if cached
|
|
40
|
+
device: Device to load the model onto (default: auto-detect cuda/cpu)
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Instantiated AuroraForPrediction model with weights loaded
|
|
44
|
+
"""
|
|
45
|
+
# 1. Load static config file from the installed package
|
|
46
|
+
config_path = get_package_file_path("config.json")
|
|
47
|
+
config = AuroraConfig.from_json_file(config_path)
|
|
48
|
+
print(f"✅ Loaded static config from package: {config_path}")
|
|
49
|
+
|
|
50
|
+
# 2. Download only the model weights from HF Hub
|
|
51
|
+
weights_path = hf_hub_download(
|
|
52
|
+
repo_id=repo_id,
|
|
53
|
+
filename=weights_filename,
|
|
54
|
+
cache_dir=cache_dir,
|
|
55
|
+
force_download=force_download,
|
|
56
|
+
resume_download=True
|
|
57
|
+
)
|
|
58
|
+
print(f"✅ Downloaded model weights from HF Hub: {weights_path}")
|
|
59
|
+
|
|
60
|
+
# 3. Instantiate the model with the static config
|
|
61
|
+
model = AuroraForPrediction(config)
|
|
62
|
+
|
|
63
|
+
# 4. Load weights according to file extension (support .safetensors and .bin/.pt/.pth)
|
|
64
|
+
file_ext = os.path.splitext(weights_filename)[-1].lower()
|
|
65
|
+
try:
|
|
66
|
+
if file_ext == ".safetensors":
|
|
67
|
+
# ✅ Load .safetensors file (safe, fast, no Pickle risks)
|
|
68
|
+
weights = safetensors_load_file(weights_path, device="cpu") # Load to CPU first
|
|
69
|
+
print("✅ Loaded weights from .safetensors file successfully!")
|
|
70
|
+
else:
|
|
71
|
+
# Fallback to torch.load() for traditional PyTorch weight formats
|
|
72
|
+
weights = torch.load(weights_path, map_location="cpu")
|
|
73
|
+
print("✅ Loaded weights from traditional PyTorch file successfully!")
|
|
74
|
+
|
|
75
|
+
# Load weights into the model
|
|
76
|
+
model.load_state_dict(weights, strict=False)
|
|
77
|
+
print("✅ Loaded weights into the model successfully!")
|
|
78
|
+
except Exception as e:
|
|
79
|
+
raise RuntimeError(f"Failed to load weights from {weights_path}: {str(e)}") from e
|
|
80
|
+
|
|
81
|
+
# 5. Auto-detect and move model to the target device
|
|
82
|
+
if device is None:
|
|
83
|
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
84
|
+
model = model.to(device)
|
|
85
|
+
print(f"✅ Model loaded successfully on device: {device}")
|
|
86
|
+
|
|
87
|
+
return model
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
# Expose core classes and functions for external use
|
|
91
|
+
__all__ = ["AuroraForPrediction", "AuroraConfig", "load_model"]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
{
|
|
2
|
+
"architectures": [
|
|
3
|
+
"BertForMaskedLM"
|
|
4
|
+
],
|
|
5
|
+
"attention_probs_dropout_prob": 0.1,
|
|
6
|
+
"gradient_checkpointing": false,
|
|
7
|
+
"hidden_act": "gelu",
|
|
8
|
+
"hidden_dropout_prob": 0.1,
|
|
9
|
+
"hidden_size": 768,
|
|
10
|
+
"initializer_range": 0.02,
|
|
11
|
+
"intermediate_size": 3072,
|
|
12
|
+
"layer_norm_eps": 1e-12,
|
|
13
|
+
"max_position_embeddings": 512,
|
|
14
|
+
"model_type": "bert",
|
|
15
|
+
"num_attention_heads": 12,
|
|
16
|
+
"num_hidden_layers": 12,
|
|
17
|
+
"pad_token_id": 0,
|
|
18
|
+
"position_embedding_type": "absolute",
|
|
19
|
+
"transformers_version": "4.6.0.dev0",
|
|
20
|
+
"type_vocab_size": 2,
|
|
21
|
+
"use_cache": true,
|
|
22
|
+
"vocab_size": 30522
|
|
23
|
+
}
|