tirex-mirror 2025.10.8__tar.gz → 2025.10.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tirex_mirror-2025.10.8/src/tirex_mirror.egg-info → tirex_mirror-2025.10.17}/PKG-INFO +25 -27
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/pyproject.toml +8 -8
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/models/slstm/cell.py +3 -3
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/models/slstm/layer.py +5 -3
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/models/tirex.py +38 -35
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17/src/tirex_mirror.egg-info}/PKG-INFO +25 -27
- tirex_mirror-2025.10.17/src/tirex_mirror.egg-info/requires.txt +37 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/tests/test_jupyterlab.py +31 -7
- tirex_mirror-2025.10.8/src/tirex_mirror.egg-info/requires.txt +0 -39
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/LICENSE +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/LICENSE_MIRROR.txt +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/MANIFEST.in +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/NOTICE.txt +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/README.md +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/setup.cfg +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/__init__.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/api_adapter/__init__.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/api_adapter/forecast.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/api_adapter/gluon.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/api_adapter/hf_data.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/api_adapter/standard_adapter.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/base.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/models/__init__.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/models/patcher.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/models/slstm/block.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/util.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex_mirror.egg-info/SOURCES.txt +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex_mirror.egg-info/dependency_links.txt +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex_mirror.egg-info/top_level.txt +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/tests/test_chronos_zs.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/tests/test_forecast.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/tests/test_forecast_adapter.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/tests/test_slstm_torch_vs_cuda.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/tests/test_standard_adapter.py +0 -0
- {tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/tests/test_util_freq.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tirex-mirror
|
|
3
|
-
Version: 2025.10.
|
|
3
|
+
Version: 2025.10.17
|
|
4
4
|
Summary: Unofficial mirror of NX-AI/tirex for packaging
|
|
5
5
|
Author-email: Arpad Rozsas <rozsasarpi@gmail.com>
|
|
6
6
|
License: NXAI COMMUNITY LICENSE AGREEMENT
|
|
@@ -63,39 +63,37 @@ Description-Content-Type: text/markdown
|
|
|
63
63
|
License-File: LICENSE
|
|
64
64
|
License-File: LICENSE_MIRROR.txt
|
|
65
65
|
License-File: NOTICE.txt
|
|
66
|
-
Requires-Dist: torch
|
|
67
|
-
Requires-Dist: einops
|
|
68
|
-
Requires-Dist: huggingface-hub
|
|
69
|
-
Requires-Dist: numpy
|
|
66
|
+
Requires-Dist: torch
|
|
67
|
+
Requires-Dist: einops
|
|
68
|
+
Requires-Dist: huggingface-hub
|
|
69
|
+
Requires-Dist: numpy
|
|
70
70
|
Provides-Extra: cuda
|
|
71
|
-
Requires-Dist: xlstm
|
|
72
|
-
Requires-Dist: ninja
|
|
71
|
+
Requires-Dist: xlstm; extra == "cuda"
|
|
72
|
+
Requires-Dist: ninja; extra == "cuda"
|
|
73
73
|
Provides-Extra: notebooks
|
|
74
|
-
Requires-Dist: ipykernel
|
|
75
|
-
Requires-Dist: matplotlib
|
|
76
|
-
Requires-Dist: pandas
|
|
77
|
-
Requires-Dist: python-dotenv
|
|
74
|
+
Requires-Dist: ipykernel; extra == "notebooks"
|
|
75
|
+
Requires-Dist: matplotlib; extra == "notebooks"
|
|
76
|
+
Requires-Dist: pandas; extra == "notebooks"
|
|
77
|
+
Requires-Dist: python-dotenv; extra == "notebooks"
|
|
78
78
|
Provides-Extra: gluonts
|
|
79
|
-
Requires-Dist: gluonts
|
|
80
|
-
Requires-Dist: pandas
|
|
79
|
+
Requires-Dist: gluonts; extra == "gluonts"
|
|
80
|
+
Requires-Dist: pandas; extra == "gluonts"
|
|
81
81
|
Provides-Extra: hfdataset
|
|
82
|
-
Requires-Dist: datasets
|
|
82
|
+
Requires-Dist: datasets; extra == "hfdataset"
|
|
83
83
|
Provides-Extra: test
|
|
84
84
|
Requires-Dist: fev>=0.6.0; extra == "test"
|
|
85
|
-
Requires-Dist: pytest
|
|
86
|
-
Requires-Dist: requests==2.32.3; extra == "test"
|
|
85
|
+
Requires-Dist: pytest; extra == "test"
|
|
87
86
|
Provides-Extra: all
|
|
88
|
-
Requires-Dist: xlstm
|
|
89
|
-
Requires-Dist: ninja
|
|
90
|
-
Requires-Dist: ipykernel
|
|
91
|
-
Requires-Dist: matplotlib
|
|
92
|
-
Requires-Dist: pandas
|
|
93
|
-
Requires-Dist: python-dotenv
|
|
94
|
-
Requires-Dist: gluonts
|
|
95
|
-
Requires-Dist: datasets
|
|
96
|
-
Requires-Dist: pytest
|
|
97
|
-
Requires-Dist: fev
|
|
98
|
-
Requires-Dist: requests==2.32.3; extra == "all"
|
|
87
|
+
Requires-Dist: xlstm; extra == "all"
|
|
88
|
+
Requires-Dist: ninja; extra == "all"
|
|
89
|
+
Requires-Dist: ipykernel; extra == "all"
|
|
90
|
+
Requires-Dist: matplotlib; extra == "all"
|
|
91
|
+
Requires-Dist: pandas; extra == "all"
|
|
92
|
+
Requires-Dist: python-dotenv; extra == "all"
|
|
93
|
+
Requires-Dist: gluonts; extra == "all"
|
|
94
|
+
Requires-Dist: datasets; extra == "all"
|
|
95
|
+
Requires-Dist: pytest; extra == "all"
|
|
96
|
+
Requires-Dist: fev>=0.6.0; extra == "all"
|
|
99
97
|
Dynamic: license-file
|
|
100
98
|
|
|
101
99
|
# tirex-mirror
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "tirex-mirror"
|
|
3
|
-
version = "2025.10.
|
|
3
|
+
version = "2025.10.17"
|
|
4
4
|
description = "Unofficial mirror of NX-AI/tirex for packaging"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.11"
|
|
7
7
|
classifiers = [ "Programming Language :: Python :: 3", "Operating System :: OS Independent",]
|
|
8
8
|
keywords = [ "TiRex", "xLSTM", "Time Series", "Zero-shot", "Deep Learning",]
|
|
9
|
-
dependencies = [ "torch
|
|
9
|
+
dependencies = [ "torch", "einops", "huggingface-hub", "numpy",]
|
|
10
10
|
[[project.authors]]
|
|
11
11
|
name = "Arpad Rozsas"
|
|
12
12
|
email = "rozsasarpi@gmail.com"
|
|
@@ -23,12 +23,12 @@ Repository = "https://github.com/rozsasarpi/tirex-mirror"
|
|
|
23
23
|
Issues = "https://github.com/rozsasarpi/tirex-mirror/issues"
|
|
24
24
|
|
|
25
25
|
[project.optional-dependencies]
|
|
26
|
-
cuda = [ "xlstm
|
|
27
|
-
notebooks = [ "ipykernel
|
|
28
|
-
gluonts = [ "gluonts
|
|
29
|
-
hfdataset = [ "datasets
|
|
30
|
-
test = [ "fev>=0.6.0", "pytest
|
|
31
|
-
all = [ "xlstm
|
|
26
|
+
cuda = [ "xlstm", "ninja",]
|
|
27
|
+
notebooks = [ "ipykernel", "matplotlib", "pandas", "python-dotenv",]
|
|
28
|
+
gluonts = [ "gluonts", "pandas",]
|
|
29
|
+
hfdataset = [ "datasets",]
|
|
30
|
+
test = [ "fev>=0.6.0", "pytest",]
|
|
31
|
+
all = [ "xlstm", "ninja", "ipykernel", "matplotlib", "pandas", "python-dotenv", "gluonts", "datasets", "pytest", "fev>=0.6.0",]
|
|
32
32
|
|
|
33
33
|
[tool.docformatter]
|
|
34
34
|
diff = false
|
|
@@ -100,7 +100,7 @@ class sLSTMCell(nn.Module):
|
|
|
100
100
|
|
|
101
101
|
def _get_input(self, x: torch.Tensor) -> torch.Tensor:
|
|
102
102
|
assert x.shape[-1] == self.config.embedding_dim * self.config.num_gates, (
|
|
103
|
-
f"Input size mismatch: Expected input size {self.config.embedding_dim * self.config.num_gates}, but got {
|
|
103
|
+
f"Input size mismatch: Expected input size {self.config.embedding_dim * self.config.num_gates}, but got {x.size(-1)}."
|
|
104
104
|
)
|
|
105
105
|
return x.view(x.shape[0], x.shape[1], self.config.num_gates, self.config.num_heads, -1).permute(1, 0, 2, 3, 4)
|
|
106
106
|
|
|
@@ -128,7 +128,7 @@ class sLSTMCellTorch:
|
|
|
128
128
|
states: torch.Tensor, # [4, B, H] only the first is used for recurrence!
|
|
129
129
|
R: torch.Tensor, # [K, R*H, H] - K num_heads
|
|
130
130
|
b: torch.Tensor, # [T*H]
|
|
131
|
-
) -> tuple[torch.Tensor, torch.Tensor
|
|
131
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
132
132
|
num_gates = 4
|
|
133
133
|
num_heads = R.shape[0]
|
|
134
134
|
S, B, _ = x.shape
|
|
@@ -167,7 +167,7 @@ class sLSTMCellTorch:
|
|
|
167
167
|
iraw, fraw, zraw, oraw = torch.unbind(raw.view(raw.shape[0], 4, -1), dim=1)
|
|
168
168
|
|
|
169
169
|
# Equations reference the xlstm paper on page 4: https://arxiv.org/pdf/2405.04517
|
|
170
|
-
logfplusm = m + F.logsigmoid(fraw) # eq 15
|
|
170
|
+
logfplusm = m + F.logsigmoid(torch.clamp(fraw, max=15)) # eq 15 # Clamp to avoid subnomals
|
|
171
171
|
mnew = torch.where(torch.all(n == 0.0), iraw, torch.max(iraw, logfplusm)) # eq 15
|
|
172
172
|
ogate = torch.sigmoid(oraw) # eq 14
|
|
173
173
|
igate = torch.minimum(torch.exp(iraw - mnew), torch.ones_like(iraw)) # eq 16
|
|
@@ -20,7 +20,7 @@ class sLSTMLayer(nn.Module):
|
|
|
20
20
|
self.ogate = LinearHeadwiseExpand(in_features, num_heads)
|
|
21
21
|
|
|
22
22
|
self.slstm_cell = sLSTMCell(self.config, backend)
|
|
23
|
-
self.group_norm = MultiHeadLayerNorm(ndim=in_features)
|
|
23
|
+
self.group_norm = MultiHeadLayerNorm(ndim=in_features, num_heads=num_heads)
|
|
24
24
|
|
|
25
25
|
def forward(self, x: torch.Tensor, slstm_state: torch.Tensor | None = None) -> torch.Tensor:
|
|
26
26
|
x_g = torch.cat((self.fgate(x), self.igate(x), self.zgate(x), self.ogate(x)), dim=-1)
|
|
@@ -50,18 +50,20 @@ class LinearHeadwiseExpand(nn.Module):
|
|
|
50
50
|
|
|
51
51
|
|
|
52
52
|
class MultiHeadLayerNorm(nn.Module):
|
|
53
|
-
def __init__(self, ndim: int):
|
|
53
|
+
def __init__(self, ndim: int, num_heads: int):
|
|
54
54
|
super().__init__()
|
|
55
55
|
self.weight = nn.Parameter(torch.zeros(ndim))
|
|
56
|
+
self.num_heads = num_heads
|
|
56
57
|
|
|
57
58
|
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
|
58
59
|
assert input.dim() == 4, "Input must be 4D tensor (B, NH, S, DH)"
|
|
59
60
|
B, NH, S, DH = input.shape
|
|
60
61
|
|
|
62
|
+
assert NH == self.num_heads
|
|
61
63
|
gn_in_1 = input.transpose(1, 2) # (B, S, NH, DH)
|
|
62
64
|
gn_in_2 = gn_in_1.reshape(B * S, NH * DH) # (B * S, NH * DH)
|
|
63
65
|
residual_weight = 1.0 + self.weight
|
|
64
|
-
out = F.group_norm(gn_in_2, num_groups=
|
|
66
|
+
out = F.group_norm(gn_in_2, num_groups=self.num_heads, weight=residual_weight)
|
|
65
67
|
# (B * S), (NH * DH) -> (B, S, NH, DH) -> (B, NH, S, DH)
|
|
66
68
|
out = out.view(B, S, NH, DH).transpose(1, 2)
|
|
67
69
|
return out
|
|
@@ -79,12 +79,18 @@ class TiRexZero(nn.Module, PretrainedModel, ForecastModel):
|
|
|
79
79
|
training_quantile_levels = self.config.quantiles
|
|
80
80
|
|
|
81
81
|
if set(quantile_levels).issubset(set(training_quantile_levels)):
|
|
82
|
-
|
|
82
|
+
quantile_indices = torch.tensor(
|
|
83
|
+
[training_quantile_levels.index(q) for q in quantile_levels],
|
|
84
|
+
dtype=torch.long,
|
|
85
|
+
device=predictions.device,
|
|
86
|
+
)
|
|
87
|
+
quantiles = torch.index_select(predictions, dim=-1, index=quantile_indices)
|
|
83
88
|
else:
|
|
84
89
|
quantiles = self._interpolate_quantiles(predictions, quantile_levels)
|
|
85
90
|
|
|
86
91
|
# median as mean
|
|
87
|
-
|
|
92
|
+
median_idx = torch.tensor([training_quantile_levels.index(0.5)], dtype=torch.long, device=predictions.device)
|
|
93
|
+
mean = torch.index_select(predictions, dim=-1, index=median_idx).squeeze(-1)
|
|
88
94
|
return quantiles, mean
|
|
89
95
|
|
|
90
96
|
@torch.inference_mode()
|
|
@@ -105,24 +111,8 @@ class TiRexZero(nn.Module, PretrainedModel, ForecastModel):
|
|
|
105
111
|
|
|
106
112
|
context = context.to(dtype=torch.float32)
|
|
107
113
|
while remaining > 0:
|
|
108
|
-
if context.shape[-1] > max_context:
|
|
109
|
-
context = context[..., -max_context:]
|
|
110
|
-
if context.shape[-1] < min_context:
|
|
111
|
-
pad = torch.full(
|
|
112
|
-
(context.shape[0], min_context - context.shape[-1]),
|
|
113
|
-
fill_value=torch.nan,
|
|
114
|
-
device=context.device,
|
|
115
|
-
dtype=context.dtype,
|
|
116
|
-
)
|
|
117
|
-
context = torch.concat((pad, context), dim=1)
|
|
118
|
-
tokenized_tensor, tokenizer_state = self.tokenizer.context_input_transform(context)
|
|
119
114
|
fut_rollouts = min(remaining, max_accelerated_rollout_steps)
|
|
120
|
-
|
|
121
|
-
prediction, _ = self._forward_model_tokenized(input_token=tokenized_tensor, rollouts=fut_rollouts)
|
|
122
|
-
prediction = prediction[:, :, -fut_rollouts:, :].to(tokenized_tensor) # predicted token
|
|
123
|
-
# [bs, num_quantiles, num_predicted_token, output_patch_size]
|
|
124
|
-
prediction = self.tokenizer.output_transform(prediction, tokenizer_state)
|
|
125
|
-
prediction = prediction.flatten(start_dim=2)
|
|
115
|
+
prediction, fut_rollouts = self._forecast_single_step(context, max_context, min_context, fut_rollouts)
|
|
126
116
|
|
|
127
117
|
predictions.append(prediction)
|
|
128
118
|
remaining -= fut_rollouts
|
|
@@ -134,6 +124,33 @@ class TiRexZero(nn.Module, PretrainedModel, ForecastModel):
|
|
|
134
124
|
|
|
135
125
|
return torch.cat(predictions, dim=-1)[..., :prediction_length].to(dtype=torch.float32)
|
|
136
126
|
|
|
127
|
+
def _forecast_single_step(
|
|
128
|
+
self,
|
|
129
|
+
context: torch.Tensor,
|
|
130
|
+
max_context: int,
|
|
131
|
+
min_context: int,
|
|
132
|
+
new_patch_count: int = 1,
|
|
133
|
+
) -> tuple[torch.Tensor, int]:
|
|
134
|
+
if context.shape[-1] > max_context:
|
|
135
|
+
context = context[..., -max_context:]
|
|
136
|
+
if context.shape[-1] < min_context:
|
|
137
|
+
pad = torch.full(
|
|
138
|
+
(context.shape[0], min_context - context.shape[-1]),
|
|
139
|
+
fill_value=torch.nan,
|
|
140
|
+
device=context.device,
|
|
141
|
+
dtype=context.dtype,
|
|
142
|
+
)
|
|
143
|
+
context = torch.concat((pad, context), dim=1)
|
|
144
|
+
|
|
145
|
+
tokenized_tensor, tokenizer_state = self.tokenizer.context_input_transform(context)
|
|
146
|
+
prediction, _ = self._forward_model_tokenized(input_token=tokenized_tensor, rollouts=new_patch_count)
|
|
147
|
+
prediction = prediction[:, :, -new_patch_count:, :].to(tokenized_tensor) # predicted token
|
|
148
|
+
# Shape: [bs, num_quantiles, num_predicted_token, output_patch_size]
|
|
149
|
+
prediction = self.tokenizer.output_transform(prediction, tokenizer_state)
|
|
150
|
+
prediction = prediction.flatten(start_dim=2)
|
|
151
|
+
|
|
152
|
+
return prediction, new_patch_count
|
|
153
|
+
|
|
137
154
|
def _forward_model_tokenized(
|
|
138
155
|
self,
|
|
139
156
|
input_token: torch.Tensor,
|
|
@@ -165,21 +182,7 @@ class TiRexZero(nn.Module, PretrainedModel, ForecastModel):
|
|
|
165
182
|
|
|
166
183
|
input_token = torch.nan_to_num(input_token, nan=self.config.nan_mask_value)
|
|
167
184
|
|
|
168
|
-
hidden_states = self.
|
|
169
|
-
|
|
170
|
-
for block in self.blocks:
|
|
171
|
-
hidden_states = block(hidden_states)
|
|
172
|
-
|
|
173
|
-
hidden_states = self.out_norm(hidden_states)
|
|
174
|
-
|
|
175
|
-
quantile_preds = self.output_patch_embedding(hidden_states)
|
|
176
|
-
quantile_preds = torch.unflatten(
|
|
177
|
-
quantile_preds, -1, (len(self.config.quantiles), self.config.output_patch_size)
|
|
178
|
-
)
|
|
179
|
-
quantile_preds = torch.transpose(quantile_preds, 1, 2) # switch quantile and num_token_dimension
|
|
180
|
-
# quantile_preds: [batch_size, num_quantiles, num_token, output_patch_size]
|
|
181
|
-
|
|
182
|
-
quantile_preds = self._forward_model(torch.cat((input_token, input_mask), dim=2))
|
|
185
|
+
quantile_preds, hidden_states = self._forward_model(torch.cat((input_token, input_mask), dim=2))
|
|
183
186
|
|
|
184
187
|
quantile_preds = torch.unflatten(
|
|
185
188
|
quantile_preds, -1, (len(self.config.quantiles), self.config.output_patch_size)
|
|
@@ -196,7 +199,7 @@ class TiRexZero(nn.Module, PretrainedModel, ForecastModel):
|
|
|
196
199
|
|
|
197
200
|
hidden_states = self.out_norm(hidden_states)
|
|
198
201
|
|
|
199
|
-
return self.output_patch_embedding(hidden_states)
|
|
202
|
+
return self.output_patch_embedding(hidden_states), hidden_states
|
|
200
203
|
|
|
201
204
|
def _interpolate_quantiles(self, predictions: torch.Tensor, quantile_levels: list[float]):
|
|
202
205
|
training_quantile_levels = self.config.quantiles
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tirex-mirror
|
|
3
|
-
Version: 2025.10.
|
|
3
|
+
Version: 2025.10.17
|
|
4
4
|
Summary: Unofficial mirror of NX-AI/tirex for packaging
|
|
5
5
|
Author-email: Arpad Rozsas <rozsasarpi@gmail.com>
|
|
6
6
|
License: NXAI COMMUNITY LICENSE AGREEMENT
|
|
@@ -63,39 +63,37 @@ Description-Content-Type: text/markdown
|
|
|
63
63
|
License-File: LICENSE
|
|
64
64
|
License-File: LICENSE_MIRROR.txt
|
|
65
65
|
License-File: NOTICE.txt
|
|
66
|
-
Requires-Dist: torch
|
|
67
|
-
Requires-Dist: einops
|
|
68
|
-
Requires-Dist: huggingface-hub
|
|
69
|
-
Requires-Dist: numpy
|
|
66
|
+
Requires-Dist: torch
|
|
67
|
+
Requires-Dist: einops
|
|
68
|
+
Requires-Dist: huggingface-hub
|
|
69
|
+
Requires-Dist: numpy
|
|
70
70
|
Provides-Extra: cuda
|
|
71
|
-
Requires-Dist: xlstm
|
|
72
|
-
Requires-Dist: ninja
|
|
71
|
+
Requires-Dist: xlstm; extra == "cuda"
|
|
72
|
+
Requires-Dist: ninja; extra == "cuda"
|
|
73
73
|
Provides-Extra: notebooks
|
|
74
|
-
Requires-Dist: ipykernel
|
|
75
|
-
Requires-Dist: matplotlib
|
|
76
|
-
Requires-Dist: pandas
|
|
77
|
-
Requires-Dist: python-dotenv
|
|
74
|
+
Requires-Dist: ipykernel; extra == "notebooks"
|
|
75
|
+
Requires-Dist: matplotlib; extra == "notebooks"
|
|
76
|
+
Requires-Dist: pandas; extra == "notebooks"
|
|
77
|
+
Requires-Dist: python-dotenv; extra == "notebooks"
|
|
78
78
|
Provides-Extra: gluonts
|
|
79
|
-
Requires-Dist: gluonts
|
|
80
|
-
Requires-Dist: pandas
|
|
79
|
+
Requires-Dist: gluonts; extra == "gluonts"
|
|
80
|
+
Requires-Dist: pandas; extra == "gluonts"
|
|
81
81
|
Provides-Extra: hfdataset
|
|
82
|
-
Requires-Dist: datasets
|
|
82
|
+
Requires-Dist: datasets; extra == "hfdataset"
|
|
83
83
|
Provides-Extra: test
|
|
84
84
|
Requires-Dist: fev>=0.6.0; extra == "test"
|
|
85
|
-
Requires-Dist: pytest
|
|
86
|
-
Requires-Dist: requests==2.32.3; extra == "test"
|
|
85
|
+
Requires-Dist: pytest; extra == "test"
|
|
87
86
|
Provides-Extra: all
|
|
88
|
-
Requires-Dist: xlstm
|
|
89
|
-
Requires-Dist: ninja
|
|
90
|
-
Requires-Dist: ipykernel
|
|
91
|
-
Requires-Dist: matplotlib
|
|
92
|
-
Requires-Dist: pandas
|
|
93
|
-
Requires-Dist: python-dotenv
|
|
94
|
-
Requires-Dist: gluonts
|
|
95
|
-
Requires-Dist: datasets
|
|
96
|
-
Requires-Dist: pytest
|
|
97
|
-
Requires-Dist: fev
|
|
98
|
-
Requires-Dist: requests==2.32.3; extra == "all"
|
|
87
|
+
Requires-Dist: xlstm; extra == "all"
|
|
88
|
+
Requires-Dist: ninja; extra == "all"
|
|
89
|
+
Requires-Dist: ipykernel; extra == "all"
|
|
90
|
+
Requires-Dist: matplotlib; extra == "all"
|
|
91
|
+
Requires-Dist: pandas; extra == "all"
|
|
92
|
+
Requires-Dist: python-dotenv; extra == "all"
|
|
93
|
+
Requires-Dist: gluonts; extra == "all"
|
|
94
|
+
Requires-Dist: datasets; extra == "all"
|
|
95
|
+
Requires-Dist: pytest; extra == "all"
|
|
96
|
+
Requires-Dist: fev>=0.6.0; extra == "all"
|
|
99
97
|
Dynamic: license-file
|
|
100
98
|
|
|
101
99
|
# tirex-mirror
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
torch
|
|
2
|
+
einops
|
|
3
|
+
huggingface-hub
|
|
4
|
+
numpy
|
|
5
|
+
|
|
6
|
+
[all]
|
|
7
|
+
xlstm
|
|
8
|
+
ninja
|
|
9
|
+
ipykernel
|
|
10
|
+
matplotlib
|
|
11
|
+
pandas
|
|
12
|
+
python-dotenv
|
|
13
|
+
gluonts
|
|
14
|
+
datasets
|
|
15
|
+
pytest
|
|
16
|
+
fev>=0.6.0
|
|
17
|
+
|
|
18
|
+
[cuda]
|
|
19
|
+
xlstm
|
|
20
|
+
ninja
|
|
21
|
+
|
|
22
|
+
[gluonts]
|
|
23
|
+
gluonts
|
|
24
|
+
pandas
|
|
25
|
+
|
|
26
|
+
[hfdataset]
|
|
27
|
+
datasets
|
|
28
|
+
|
|
29
|
+
[notebooks]
|
|
30
|
+
ipykernel
|
|
31
|
+
matplotlib
|
|
32
|
+
pandas
|
|
33
|
+
python-dotenv
|
|
34
|
+
|
|
35
|
+
[test]
|
|
36
|
+
fev>=0.6.0
|
|
37
|
+
pytest
|
|
@@ -1,21 +1,45 @@
|
|
|
1
1
|
# Copyright (c) NXAI GmbH.
|
|
2
2
|
# This software may be used and distributed according to the terms of the NXAI Community License Agreement.
|
|
3
3
|
|
|
4
|
-
import
|
|
4
|
+
import logging
|
|
5
|
+
import subprocess
|
|
6
|
+
|
|
5
7
|
import pytest
|
|
8
|
+
import requests
|
|
6
9
|
|
|
7
10
|
cpu_url = "http://localhost:8889"
|
|
8
11
|
# gpu_url = "http://localhost:8888" - will be added as soon as self-hosted gpu runner is available
|
|
9
12
|
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _docker_container_running() -> bool:
|
|
17
|
+
"""Return True if any Docker container is currently running."""
|
|
18
|
+
try:
|
|
19
|
+
result = subprocess.run(
|
|
20
|
+
["docker", "ps", "-q"],
|
|
21
|
+
check=True,
|
|
22
|
+
text=True,
|
|
23
|
+
stdout=subprocess.PIPE,
|
|
24
|
+
stderr=subprocess.DEVNULL,
|
|
25
|
+
)
|
|
26
|
+
except (FileNotFoundError, subprocess.CalledProcessError):
|
|
27
|
+
return False
|
|
28
|
+
|
|
29
|
+
return bool(result.stdout.strip())
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
pytestmark = pytest.mark.skipif(
|
|
33
|
+
not _docker_container_running(),
|
|
34
|
+
reason="requires Docker with a running container exposing JupyterLab",
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
10
38
|
def test_jupyterlab_running():
|
|
11
39
|
"""Check that the JupyterLab instance inside the container is reachable."""
|
|
12
40
|
try:
|
|
13
41
|
response = requests.get(cpu_url, timeout=5) # timeout prevents hanging
|
|
14
|
-
|
|
15
|
-
print(f"✅ Connected to {cpu_url}")
|
|
16
|
-
print("Status Code:", response.status_code)
|
|
17
|
-
|
|
18
|
-
# Basic validation
|
|
42
|
+
logger.info("✅ Connected to %s, status code: %s", cpu_url, response.status_code)
|
|
19
43
|
assert response.status_code in [200, 302], f"Unexpected status code: {response.status_code}"
|
|
20
44
|
|
|
21
45
|
except requests.exceptions.ConnectionError:
|
|
@@ -25,4 +49,4 @@ def test_jupyterlab_running():
|
|
|
25
49
|
pytest.fail(f"⏰ Connection to {cpu_url} timed out")
|
|
26
50
|
|
|
27
51
|
except requests.exceptions.RequestException as e:
|
|
28
|
-
pytest.fail(f"⚠️ General error connecting to {cpu_url}: {e}")
|
|
52
|
+
pytest.fail(f"⚠️ General error connecting to {cpu_url}: {e}")
|
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
torch==2.8.0
|
|
2
|
-
einops==0.8.1
|
|
3
|
-
huggingface-hub==0.34.4
|
|
4
|
-
numpy==2.1.2
|
|
5
|
-
|
|
6
|
-
[all]
|
|
7
|
-
xlstm==2.0.4
|
|
8
|
-
ninja==1.13.0
|
|
9
|
-
ipykernel==6.30.1
|
|
10
|
-
matplotlib==3.10.6
|
|
11
|
-
pandas==2.3.1
|
|
12
|
-
python-dotenv==1.1.1
|
|
13
|
-
gluonts==0.16.2
|
|
14
|
-
datasets==3.6.0
|
|
15
|
-
pytest==8.4.2
|
|
16
|
-
fev==0.6.0
|
|
17
|
-
requests==2.32.3
|
|
18
|
-
|
|
19
|
-
[cuda]
|
|
20
|
-
xlstm==2.0.4
|
|
21
|
-
ninja==1.13.0
|
|
22
|
-
|
|
23
|
-
[gluonts]
|
|
24
|
-
gluonts==0.16.2
|
|
25
|
-
pandas==2.3.1
|
|
26
|
-
|
|
27
|
-
[hfdataset]
|
|
28
|
-
datasets==3.6.0
|
|
29
|
-
|
|
30
|
-
[notebooks]
|
|
31
|
-
ipykernel==6.30.1
|
|
32
|
-
matplotlib==3.10.6
|
|
33
|
-
pandas==2.3.1
|
|
34
|
-
python-dotenv==1.1.1
|
|
35
|
-
|
|
36
|
-
[test]
|
|
37
|
-
fev>=0.6.0
|
|
38
|
-
pytest==8.4.2
|
|
39
|
-
requests==2.32.3
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex/api_adapter/standard_adapter.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{tirex_mirror-2025.10.8 → tirex_mirror-2025.10.17}/src/tirex_mirror.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|