isage-middleware 0.2.4.3__cp311-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isage_middleware-0.2.4.3.dist-info/METADATA +266 -0
- isage_middleware-0.2.4.3.dist-info/RECORD +94 -0
- isage_middleware-0.2.4.3.dist-info/WHEEL +5 -0
- isage_middleware-0.2.4.3.dist-info/top_level.txt +1 -0
- sage/middleware/__init__.py +59 -0
- sage/middleware/_version.py +6 -0
- sage/middleware/components/__init__.py +30 -0
- sage/middleware/components/extensions_compat.py +141 -0
- sage/middleware/components/sage_db/__init__.py +116 -0
- sage/middleware/components/sage_db/backend.py +136 -0
- sage/middleware/components/sage_db/service.py +15 -0
- sage/middleware/components/sage_flow/__init__.py +76 -0
- sage/middleware/components/sage_flow/python/__init__.py +14 -0
- sage/middleware/components/sage_flow/python/micro_service/__init__.py +4 -0
- sage/middleware/components/sage_flow/python/micro_service/sage_flow_service.py +88 -0
- sage/middleware/components/sage_flow/python/sage_flow.py +30 -0
- sage/middleware/components/sage_flow/service.py +14 -0
- sage/middleware/components/sage_mem/__init__.py +83 -0
- sage/middleware/components/sage_sias/__init__.py +59 -0
- sage/middleware/components/sage_sias/continual_learner.py +184 -0
- sage/middleware/components/sage_sias/coreset_selector.py +302 -0
- sage/middleware/components/sage_sias/types.py +94 -0
- sage/middleware/components/sage_tsdb/__init__.py +81 -0
- sage/middleware/components/sage_tsdb/python/__init__.py +21 -0
- sage/middleware/components/sage_tsdb/python/_sage_tsdb.pyi +17 -0
- sage/middleware/components/sage_tsdb/python/algorithms/__init__.py +17 -0
- sage/middleware/components/sage_tsdb/python/algorithms/base.py +51 -0
- sage/middleware/components/sage_tsdb/python/algorithms/out_of_order_join.py +248 -0
- sage/middleware/components/sage_tsdb/python/algorithms/window_aggregator.py +296 -0
- sage/middleware/components/sage_tsdb/python/micro_service/__init__.py +7 -0
- sage/middleware/components/sage_tsdb/python/micro_service/sage_tsdb_service.py +365 -0
- sage/middleware/components/sage_tsdb/python/sage_tsdb.py +523 -0
- sage/middleware/components/sage_tsdb/service.py +17 -0
- sage/middleware/components/vector_stores/__init__.py +25 -0
- sage/middleware/components/vector_stores/chroma.py +483 -0
- sage/middleware/components/vector_stores/chroma_adapter.py +185 -0
- sage/middleware/components/vector_stores/milvus.py +677 -0
- sage/middleware/operators/__init__.py +56 -0
- sage/middleware/operators/agent/__init__.py +24 -0
- sage/middleware/operators/agent/planning/__init__.py +5 -0
- sage/middleware/operators/agent/planning/llm_adapter.py +41 -0
- sage/middleware/operators/agent/planning/planner_adapter.py +98 -0
- sage/middleware/operators/agent/planning/router.py +107 -0
- sage/middleware/operators/agent/runtime.py +296 -0
- sage/middleware/operators/agentic/__init__.py +41 -0
- sage/middleware/operators/agentic/config.py +254 -0
- sage/middleware/operators/agentic/planning_operator.py +125 -0
- sage/middleware/operators/agentic/refined_searcher.py +132 -0
- sage/middleware/operators/agentic/runtime.py +241 -0
- sage/middleware/operators/agentic/timing_operator.py +125 -0
- sage/middleware/operators/agentic/tool_selection_operator.py +127 -0
- sage/middleware/operators/context/__init__.py +17 -0
- sage/middleware/operators/context/critic_evaluation.py +16 -0
- sage/middleware/operators/context/model_context.py +565 -0
- sage/middleware/operators/context/quality_label.py +12 -0
- sage/middleware/operators/context/search_query_results.py +61 -0
- sage/middleware/operators/context/search_result.py +42 -0
- sage/middleware/operators/context/search_session.py +79 -0
- sage/middleware/operators/filters/__init__.py +26 -0
- sage/middleware/operators/filters/context_sink.py +387 -0
- sage/middleware/operators/filters/context_source.py +376 -0
- sage/middleware/operators/filters/evaluate_filter.py +83 -0
- sage/middleware/operators/filters/tool_filter.py +74 -0
- sage/middleware/operators/llm/__init__.py +18 -0
- sage/middleware/operators/llm/sagellm_generator.py +432 -0
- sage/middleware/operators/rag/__init__.py +147 -0
- sage/middleware/operators/rag/arxiv.py +331 -0
- sage/middleware/operators/rag/chunk.py +13 -0
- sage/middleware/operators/rag/document_loaders.py +23 -0
- sage/middleware/operators/rag/evaluate.py +658 -0
- sage/middleware/operators/rag/generator.py +340 -0
- sage/middleware/operators/rag/index_builder/__init__.py +48 -0
- sage/middleware/operators/rag/index_builder/builder.py +363 -0
- sage/middleware/operators/rag/index_builder/manifest.py +101 -0
- sage/middleware/operators/rag/index_builder/storage.py +131 -0
- sage/middleware/operators/rag/pipeline.py +46 -0
- sage/middleware/operators/rag/profiler.py +59 -0
- sage/middleware/operators/rag/promptor.py +400 -0
- sage/middleware/operators/rag/refiner.py +231 -0
- sage/middleware/operators/rag/reranker.py +364 -0
- sage/middleware/operators/rag/retriever.py +1308 -0
- sage/middleware/operators/rag/searcher.py +37 -0
- sage/middleware/operators/rag/types.py +28 -0
- sage/middleware/operators/rag/writer.py +80 -0
- sage/middleware/operators/tools/__init__.py +71 -0
- sage/middleware/operators/tools/arxiv_paper_searcher.py +175 -0
- sage/middleware/operators/tools/arxiv_searcher.py +102 -0
- sage/middleware/operators/tools/duckduckgo_searcher.py +105 -0
- sage/middleware/operators/tools/image_captioner.py +104 -0
- sage/middleware/operators/tools/nature_news_fetcher.py +224 -0
- sage/middleware/operators/tools/searcher_tool.py +514 -0
- sage/middleware/operators/tools/text_detector.py +185 -0
- sage/middleware/operators/tools/url_text_extractor.py +104 -0
- sage/middleware/py.typed +2 -0
|
@@ -0,0 +1,523 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SAGE TSDB - High-performance time series database for streaming data
|
|
3
|
+
|
|
4
|
+
This module provides Python APIs for time series data storage, querying,
|
|
5
|
+
and processing with support for out-of-order data and various algorithms.
|
|
6
|
+
|
|
7
|
+
Uses C++ implementation for high performance when available, with pure Python fallback.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
from enum import Enum
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
import numpy as np
|
|
16
|
+
|
|
17
|
+
# Try to import C++ bindings
|
|
18
|
+
try:
|
|
19
|
+
from . import _sage_tsdb
|
|
20
|
+
|
|
21
|
+
HAS_CPP_BACKEND = True
|
|
22
|
+
except ImportError:
|
|
23
|
+
_sage_tsdb = None
|
|
24
|
+
HAS_CPP_BACKEND = False
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AggregationType(Enum):
|
|
28
|
+
"""Time series aggregation types"""
|
|
29
|
+
|
|
30
|
+
SUM = "sum"
|
|
31
|
+
AVG = "avg"
|
|
32
|
+
MIN = "min"
|
|
33
|
+
MAX = "max"
|
|
34
|
+
COUNT = "count"
|
|
35
|
+
FIRST = "first"
|
|
36
|
+
LAST = "last"
|
|
37
|
+
STDDEV = "stddev"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class InterpolationType(Enum):
|
|
41
|
+
"""Interpolation methods for missing data"""
|
|
42
|
+
|
|
43
|
+
NONE = "none"
|
|
44
|
+
LINEAR = "linear"
|
|
45
|
+
FORWARD_FILL = "forward_fill"
|
|
46
|
+
BACKWARD_FILL = "backward_fill"
|
|
47
|
+
ZERO = "zero"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass
|
|
51
|
+
class TimeRange:
|
|
52
|
+
"""Time range for queries"""
|
|
53
|
+
|
|
54
|
+
start_time: int | datetime
|
|
55
|
+
end_time: int | datetime
|
|
56
|
+
|
|
57
|
+
def __post_init__(self):
|
|
58
|
+
"""Convert datetime to timestamp if necessary"""
|
|
59
|
+
if isinstance(self.start_time, datetime):
|
|
60
|
+
self.start_time = int(self.start_time.timestamp() * 1000)
|
|
61
|
+
if isinstance(self.end_time, datetime):
|
|
62
|
+
self.end_time = int(self.end_time.timestamp() * 1000)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass
|
|
66
|
+
class TimeSeriesData:
|
|
67
|
+
"""Time series data point"""
|
|
68
|
+
|
|
69
|
+
timestamp: int # milliseconds since epoch
|
|
70
|
+
value: float | np.ndarray
|
|
71
|
+
tags: dict[str, str] | None = None
|
|
72
|
+
fields: dict[str, Any] | None = None
|
|
73
|
+
|
|
74
|
+
def __post_init__(self):
|
|
75
|
+
"""Initialize default values"""
|
|
76
|
+
if self.tags is None:
|
|
77
|
+
self.tags = {}
|
|
78
|
+
if self.fields is None:
|
|
79
|
+
self.fields = {}
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@dataclass
|
|
83
|
+
class QueryConfig:
|
|
84
|
+
"""Configuration for time series queries"""
|
|
85
|
+
|
|
86
|
+
time_range: TimeRange
|
|
87
|
+
tags: dict[str, str] | None = None
|
|
88
|
+
aggregation: AggregationType | None = None
|
|
89
|
+
window_size: int | None = None # milliseconds
|
|
90
|
+
interpolation: InterpolationType = InterpolationType.NONE
|
|
91
|
+
limit: int | None = None
|
|
92
|
+
downsample_factor: int | None = None
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class TimeSeriesIndex:
|
|
96
|
+
"""
|
|
97
|
+
Index structure for efficient time series queries.
|
|
98
|
+
Supports fast lookup by timestamp and tags.
|
|
99
|
+
"""
|
|
100
|
+
|
|
101
|
+
def __init__(self):
|
|
102
|
+
self._data: list[TimeSeriesData] = []
|
|
103
|
+
self._tag_index: dict[str, dict[str, list[int]]] = {}
|
|
104
|
+
self._sorted = True
|
|
105
|
+
|
|
106
|
+
def add(self, data: TimeSeriesData) -> int:
|
|
107
|
+
"""Add a time series data point"""
|
|
108
|
+
idx = len(self._data)
|
|
109
|
+
self._data.append(data)
|
|
110
|
+
|
|
111
|
+
# Update tag index
|
|
112
|
+
for key, value in data.tags.items():
|
|
113
|
+
if key not in self._tag_index:
|
|
114
|
+
self._tag_index[key] = {}
|
|
115
|
+
if value not in self._tag_index[key]:
|
|
116
|
+
self._tag_index[key][value] = []
|
|
117
|
+
self._tag_index[key][value].append(idx)
|
|
118
|
+
|
|
119
|
+
# Mark as unsorted if new data is out of order
|
|
120
|
+
if idx > 0 and data.timestamp < self._data[idx - 1].timestamp:
|
|
121
|
+
self._sorted = False
|
|
122
|
+
|
|
123
|
+
return idx
|
|
124
|
+
|
|
125
|
+
def add_batch(self, data_list: list[TimeSeriesData]) -> list[int]:
|
|
126
|
+
"""Add multiple time series data points"""
|
|
127
|
+
return [self.add(data) for data in data_list]
|
|
128
|
+
|
|
129
|
+
def _ensure_sorted(self):
|
|
130
|
+
"""Sort data by timestamp if needed"""
|
|
131
|
+
if not self._sorted:
|
|
132
|
+
# Sort data and rebuild tag index
|
|
133
|
+
sorted_data = sorted(self._data, key=lambda x: x.timestamp)
|
|
134
|
+
self._data = sorted_data
|
|
135
|
+
self._rebuild_tag_index()
|
|
136
|
+
self._sorted = True
|
|
137
|
+
|
|
138
|
+
def _rebuild_tag_index(self):
|
|
139
|
+
"""Rebuild tag index after sorting"""
|
|
140
|
+
self._tag_index = {}
|
|
141
|
+
for idx, data in enumerate(self._data):
|
|
142
|
+
for key, value in data.tags.items():
|
|
143
|
+
if key not in self._tag_index:
|
|
144
|
+
self._tag_index[key] = {}
|
|
145
|
+
if value not in self._tag_index[key]:
|
|
146
|
+
self._tag_index[key][value] = []
|
|
147
|
+
self._tag_index[key][value].append(idx)
|
|
148
|
+
|
|
149
|
+
def query(self, config: QueryConfig) -> list[TimeSeriesData]:
|
|
150
|
+
"""Query time series data"""
|
|
151
|
+
self._ensure_sorted()
|
|
152
|
+
|
|
153
|
+
# Binary search for time range
|
|
154
|
+
# Note: TimeRange.__post_init__ converts datetime to int
|
|
155
|
+
start_idx = self._binary_search(config.time_range.start_time) # type: ignore[arg-type]
|
|
156
|
+
end_idx = self._binary_search(config.time_range.end_time, find_upper=True) # type: ignore[arg-type]
|
|
157
|
+
|
|
158
|
+
# Filter by tags if specified
|
|
159
|
+
if config.tags:
|
|
160
|
+
matching_indices = self._filter_by_tags(config.tags)
|
|
161
|
+
# Intersect with time range
|
|
162
|
+
result_indices = [i for i in range(start_idx, end_idx + 1) if i in matching_indices]
|
|
163
|
+
else:
|
|
164
|
+
result_indices = list(range(start_idx, end_idx + 1))
|
|
165
|
+
|
|
166
|
+
# Get data points
|
|
167
|
+
results = [self._data[i] for i in result_indices]
|
|
168
|
+
|
|
169
|
+
# Apply limit if specified
|
|
170
|
+
if config.limit is not None:
|
|
171
|
+
results = results[: config.limit]
|
|
172
|
+
|
|
173
|
+
return results
|
|
174
|
+
|
|
175
|
+
def _binary_search(self, timestamp: int, find_upper: bool = False) -> int:
|
|
176
|
+
"""
|
|
177
|
+
Binary search for timestamp.
|
|
178
|
+
If find_upper is False, returns the first index with timestamp >= target (lower bound).
|
|
179
|
+
If find_upper is True, returns the last index with timestamp <= target (upper bound).
|
|
180
|
+
"""
|
|
181
|
+
low, high = 0, len(self._data) - 1
|
|
182
|
+
if not self._data:
|
|
183
|
+
return -1
|
|
184
|
+
|
|
185
|
+
if not find_upper:
|
|
186
|
+
# Lower bound: first index with timestamp >= target
|
|
187
|
+
while low <= high:
|
|
188
|
+
mid = (low + high) // 2
|
|
189
|
+
mid_time = self._data[mid].timestamp
|
|
190
|
+
if mid_time < timestamp:
|
|
191
|
+
low = mid + 1
|
|
192
|
+
else:
|
|
193
|
+
high = mid - 1
|
|
194
|
+
return low if low < len(self._data) else len(self._data) - 1
|
|
195
|
+
else:
|
|
196
|
+
# Upper bound: last index with timestamp <= target
|
|
197
|
+
while low <= high:
|
|
198
|
+
mid = (low + high) // 2
|
|
199
|
+
mid_time = self._data[mid].timestamp
|
|
200
|
+
if mid_time > timestamp:
|
|
201
|
+
high = mid - 1
|
|
202
|
+
else:
|
|
203
|
+
low = mid + 1
|
|
204
|
+
return high if high >= 0 else 0
|
|
205
|
+
|
|
206
|
+
def _filter_by_tags(self, tags: dict[str, str]) -> set:
|
|
207
|
+
"""Filter indices by tags"""
|
|
208
|
+
matching_sets = []
|
|
209
|
+
for key, value in tags.items():
|
|
210
|
+
if key in self._tag_index and value in self._tag_index[key]:
|
|
211
|
+
matching_sets.append(set(self._tag_index[key][value]))
|
|
212
|
+
else:
|
|
213
|
+
return set() # No match found
|
|
214
|
+
|
|
215
|
+
# Intersect all matching sets
|
|
216
|
+
if matching_sets:
|
|
217
|
+
return set.intersection(*matching_sets)
|
|
218
|
+
return set()
|
|
219
|
+
|
|
220
|
+
def size(self) -> int:
|
|
221
|
+
"""Get number of data points"""
|
|
222
|
+
return len(self._data)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
class SageTSDB:
|
|
226
|
+
"""
|
|
227
|
+
High-performance time series database for streaming data.
|
|
228
|
+
|
|
229
|
+
Features:
|
|
230
|
+
- Efficient storage and indexing of time series data
|
|
231
|
+
- Support for out-of-order data ingestion
|
|
232
|
+
- Fast queries with time range and tag filtering
|
|
233
|
+
- Pluggable algorithms for stream processing
|
|
234
|
+
- Window-based aggregations
|
|
235
|
+
|
|
236
|
+
Uses C++ backend when available for optimal performance.
|
|
237
|
+
"""
|
|
238
|
+
|
|
239
|
+
def __init__(self, config: dict[str, Any] | None = None):
|
|
240
|
+
"""
|
|
241
|
+
Initialize time series database.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
config: Optional configuration dictionary
|
|
245
|
+
"""
|
|
246
|
+
self._config = config or {}
|
|
247
|
+
|
|
248
|
+
# Use C++ backend if available
|
|
249
|
+
if HAS_CPP_BACKEND:
|
|
250
|
+
self._db = _sage_tsdb.TimeSeriesDB() # type: ignore[attr-defined]
|
|
251
|
+
self._backend = "cpp"
|
|
252
|
+
else:
|
|
253
|
+
# Fallback to pure Python implementation
|
|
254
|
+
self._index = TimeSeriesIndex()
|
|
255
|
+
self._backend = "python"
|
|
256
|
+
|
|
257
|
+
self._algorithms: dict[str, Any] = {}
|
|
258
|
+
|
|
259
|
+
def add(
|
|
260
|
+
self,
|
|
261
|
+
timestamp: int | datetime,
|
|
262
|
+
value: float | np.ndarray,
|
|
263
|
+
tags: dict[str, str] | None = None,
|
|
264
|
+
fields: dict[str, Any] | None = None,
|
|
265
|
+
) -> int:
|
|
266
|
+
"""
|
|
267
|
+
Add a single time series data point.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
timestamp: Unix timestamp in milliseconds or datetime
|
|
271
|
+
value: Numeric value or array
|
|
272
|
+
tags: Optional tags for indexing
|
|
273
|
+
fields: Optional additional fields
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
Index of the added data point
|
|
277
|
+
"""
|
|
278
|
+
if isinstance(timestamp, datetime):
|
|
279
|
+
timestamp = int(timestamp.timestamp() * 1000)
|
|
280
|
+
|
|
281
|
+
if self._backend == "cpp":
|
|
282
|
+
# Use C++ backend
|
|
283
|
+
if isinstance(value, np.ndarray):
|
|
284
|
+
value_list = value.tolist()
|
|
285
|
+
elif isinstance(value, (list, tuple)):
|
|
286
|
+
value_list = list(value)
|
|
287
|
+
else:
|
|
288
|
+
value_list = value
|
|
289
|
+
|
|
290
|
+
# C++ backend handles tags/fields differently
|
|
291
|
+
return self._db.add(
|
|
292
|
+
timestamp,
|
|
293
|
+
value_list if isinstance(value_list, list) else value_list,
|
|
294
|
+
tags or {},
|
|
295
|
+
fields or {},
|
|
296
|
+
)
|
|
297
|
+
else:
|
|
298
|
+
# Pure Python implementation
|
|
299
|
+
data = TimeSeriesData(timestamp=timestamp, value=value, tags=tags, fields=fields)
|
|
300
|
+
return self._index.add(data)
|
|
301
|
+
|
|
302
|
+
def add_batch(
|
|
303
|
+
self,
|
|
304
|
+
timestamps: list[int] | list[datetime] | np.ndarray,
|
|
305
|
+
values: list[float] | np.ndarray,
|
|
306
|
+
tags_list: list[dict[str, str]] | None = None,
|
|
307
|
+
fields_list: list[dict[str, Any]] | None = None,
|
|
308
|
+
) -> list[int]:
|
|
309
|
+
"""
|
|
310
|
+
Add multiple time series data points.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
timestamps: List of timestamps
|
|
314
|
+
values: List of values
|
|
315
|
+
tags_list: Optional list of tags
|
|
316
|
+
fields_list: Optional list of fields
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
List of indices for added data points
|
|
320
|
+
"""
|
|
321
|
+
# Convert to consistent format
|
|
322
|
+
if isinstance(timestamps, np.ndarray):
|
|
323
|
+
timestamps = timestamps.tolist()
|
|
324
|
+
if isinstance(values, np.ndarray):
|
|
325
|
+
values = values.tolist()
|
|
326
|
+
|
|
327
|
+
# Convert datetime to timestamps
|
|
328
|
+
ts_list = []
|
|
329
|
+
for ts in timestamps:
|
|
330
|
+
if isinstance(ts, datetime):
|
|
331
|
+
ts_list.append(int(ts.timestamp() * 1000))
|
|
332
|
+
else:
|
|
333
|
+
ts_list.append(ts)
|
|
334
|
+
|
|
335
|
+
# Create data points
|
|
336
|
+
n = len(ts_list)
|
|
337
|
+
tags_list = tags_list or [None] * n # type: ignore[list-item]
|
|
338
|
+
fields_list = fields_list or [None] * n # type: ignore[list-item]
|
|
339
|
+
|
|
340
|
+
data_list = [
|
|
341
|
+
TimeSeriesData(
|
|
342
|
+
timestamp=ts_list[i],
|
|
343
|
+
value=values[i],
|
|
344
|
+
tags=tags_list[i],
|
|
345
|
+
fields=fields_list[i],
|
|
346
|
+
)
|
|
347
|
+
for i in range(n)
|
|
348
|
+
]
|
|
349
|
+
|
|
350
|
+
return self._index.add_batch(data_list)
|
|
351
|
+
|
|
352
|
+
def query(
|
|
353
|
+
self,
|
|
354
|
+
time_range: TimeRange,
|
|
355
|
+
tags: dict[str, str] | None = None,
|
|
356
|
+
aggregation: AggregationType | None = None,
|
|
357
|
+
window_size: int | None = None,
|
|
358
|
+
limit: int | None = None,
|
|
359
|
+
) -> list[TimeSeriesData]:
|
|
360
|
+
"""
|
|
361
|
+
Query time series data.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
time_range: Time range for query
|
|
365
|
+
tags: Optional tags to filter by
|
|
366
|
+
aggregation: Optional aggregation type
|
|
367
|
+
window_size: Optional window size for aggregation (ms)
|
|
368
|
+
limit: Optional limit on number of results
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
List of matching time series data points
|
|
372
|
+
"""
|
|
373
|
+
config = QueryConfig(
|
|
374
|
+
time_range=time_range,
|
|
375
|
+
tags=tags,
|
|
376
|
+
aggregation=aggregation,
|
|
377
|
+
window_size=window_size,
|
|
378
|
+
limit=limit,
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
results = self._index.query(config)
|
|
382
|
+
|
|
383
|
+
# Apply aggregation if specified
|
|
384
|
+
if aggregation and window_size:
|
|
385
|
+
results = self._apply_aggregation(results, aggregation, window_size)
|
|
386
|
+
|
|
387
|
+
return results
|
|
388
|
+
|
|
389
|
+
def _apply_aggregation(
|
|
390
|
+
self,
|
|
391
|
+
data: list[TimeSeriesData],
|
|
392
|
+
aggregation: AggregationType,
|
|
393
|
+
window_size: int,
|
|
394
|
+
) -> list[TimeSeriesData]:
|
|
395
|
+
"""Apply window-based aggregation"""
|
|
396
|
+
if not data:
|
|
397
|
+
return []
|
|
398
|
+
|
|
399
|
+
aggregated = []
|
|
400
|
+
window_start = data[0].timestamp
|
|
401
|
+
window_data = []
|
|
402
|
+
|
|
403
|
+
for point in data:
|
|
404
|
+
# Check if still in current window
|
|
405
|
+
if point.timestamp < window_start + window_size:
|
|
406
|
+
window_data.append(point)
|
|
407
|
+
else:
|
|
408
|
+
# Aggregate current window
|
|
409
|
+
if window_data:
|
|
410
|
+
agg_point = self._aggregate_window(window_data, aggregation, window_start)
|
|
411
|
+
aggregated.append(agg_point)
|
|
412
|
+
|
|
413
|
+
# Start new window
|
|
414
|
+
window_start = point.timestamp
|
|
415
|
+
window_data = [point]
|
|
416
|
+
|
|
417
|
+
# Aggregate last window
|
|
418
|
+
if window_data:
|
|
419
|
+
agg_point = self._aggregate_window(window_data, aggregation, window_start)
|
|
420
|
+
aggregated.append(agg_point)
|
|
421
|
+
|
|
422
|
+
return aggregated
|
|
423
|
+
|
|
424
|
+
def _aggregate_window(
|
|
425
|
+
self,
|
|
426
|
+
data: list[TimeSeriesData],
|
|
427
|
+
aggregation: AggregationType,
|
|
428
|
+
window_timestamp: int,
|
|
429
|
+
) -> TimeSeriesData:
|
|
430
|
+
"""Aggregate a window of data"""
|
|
431
|
+
values = [point.value for point in data]
|
|
432
|
+
|
|
433
|
+
if aggregation == AggregationType.SUM:
|
|
434
|
+
agg_value = sum(values)
|
|
435
|
+
elif aggregation == AggregationType.AVG:
|
|
436
|
+
agg_value = sum(values) / len(values)
|
|
437
|
+
elif aggregation == AggregationType.MIN:
|
|
438
|
+
agg_value = min(values)
|
|
439
|
+
elif aggregation == AggregationType.MAX:
|
|
440
|
+
agg_value = max(values)
|
|
441
|
+
elif aggregation == AggregationType.COUNT:
|
|
442
|
+
agg_value = len(values)
|
|
443
|
+
elif aggregation == AggregationType.FIRST:
|
|
444
|
+
agg_value = values[0]
|
|
445
|
+
elif aggregation == AggregationType.LAST:
|
|
446
|
+
agg_value = values[-1]
|
|
447
|
+
elif aggregation == AggregationType.STDDEV:
|
|
448
|
+
agg_value = float(np.std(values)) # type: ignore[arg-type]
|
|
449
|
+
else:
|
|
450
|
+
agg_value = sum(values) / len(values)
|
|
451
|
+
|
|
452
|
+
# Merge tags from all data points
|
|
453
|
+
merged_tags = {}
|
|
454
|
+
for point in data:
|
|
455
|
+
if point.tags:
|
|
456
|
+
merged_tags.update(point.tags)
|
|
457
|
+
|
|
458
|
+
return TimeSeriesData(
|
|
459
|
+
timestamp=window_timestamp,
|
|
460
|
+
value=agg_value,
|
|
461
|
+
tags=merged_tags,
|
|
462
|
+
fields={"window_size": len(data)},
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
def register_algorithm(self, name: str, algorithm: Any):
|
|
466
|
+
"""
|
|
467
|
+
Register a custom algorithm.
|
|
468
|
+
|
|
469
|
+
Args:
|
|
470
|
+
name: Algorithm name
|
|
471
|
+
algorithm: Algorithm instance
|
|
472
|
+
"""
|
|
473
|
+
self._algorithms[name] = algorithm
|
|
474
|
+
|
|
475
|
+
def apply_algorithm(self, name: str, data: list[TimeSeriesData], **kwargs) -> Any:
|
|
476
|
+
"""
|
|
477
|
+
Apply a registered algorithm.
|
|
478
|
+
|
|
479
|
+
Args:
|
|
480
|
+
name: Algorithm name
|
|
481
|
+
data: Input data
|
|
482
|
+
**kwargs: Algorithm-specific parameters
|
|
483
|
+
|
|
484
|
+
Returns:
|
|
485
|
+
Algorithm output
|
|
486
|
+
"""
|
|
487
|
+
if name not in self._algorithms:
|
|
488
|
+
raise ValueError(f"Algorithm '{name}' not registered")
|
|
489
|
+
|
|
490
|
+
return self._algorithms[name].process(data, **kwargs)
|
|
491
|
+
|
|
492
|
+
@property
|
|
493
|
+
def size(self) -> int:
|
|
494
|
+
"""Get number of data points"""
|
|
495
|
+
if self._backend == "cpp":
|
|
496
|
+
return self._db.size()
|
|
497
|
+
else:
|
|
498
|
+
return self._index.size()
|
|
499
|
+
|
|
500
|
+
def get_stats(self) -> dict[str, Any]:
|
|
501
|
+
"""Get database statistics"""
|
|
502
|
+
stats = {
|
|
503
|
+
"size": self.size,
|
|
504
|
+
"backend": self._backend,
|
|
505
|
+
"algorithms": list(self._algorithms.keys()),
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
if self._backend == "cpp":
|
|
509
|
+
# Get C++ specific stats
|
|
510
|
+
cpp_stats = self._db.get_stats()
|
|
511
|
+
stats.update(cpp_stats)
|
|
512
|
+
|
|
513
|
+
return stats
|
|
514
|
+
|
|
515
|
+
|
|
516
|
+
__all__ = [
|
|
517
|
+
"SageTSDB",
|
|
518
|
+
"TimeSeriesData",
|
|
519
|
+
"TimeRange",
|
|
520
|
+
"QueryConfig",
|
|
521
|
+
"AggregationType",
|
|
522
|
+
"InterpolationType",
|
|
523
|
+
]
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SageTSDB Middleware Service
|
|
3
|
+
|
|
4
|
+
This module provides the middleware service interface for SageTSDB,
|
|
5
|
+
wrapping the Python implementation for time series data processing.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
# Micro-service wrapper
|
|
9
|
+
from .python.micro_service.sage_tsdb_service import (
|
|
10
|
+
SageTSDBService,
|
|
11
|
+
SageTSDBServiceConfig,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# Core Python API
|
|
15
|
+
from .python.sage_tsdb import SageTSDB
|
|
16
|
+
|
|
17
|
+
__all__ = ["SageTSDB", "SageTSDBService", "SageTSDBServiceConfig"]
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""Vector store backends for SAGE middleware.
|
|
2
|
+
|
|
3
|
+
This module provides adapters for various vector databases:
|
|
4
|
+
- Milvus / Milvus Lite
|
|
5
|
+
- ChromaDB
|
|
6
|
+
- (SageVDB is in separate sage_db component)
|
|
7
|
+
|
|
8
|
+
These were promoted from sage-libs/integrations because they depend on
|
|
9
|
+
external database services (violates L3 → L4 layering).
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
from sage.middleware.components.vector_stores import MilvusBackend, ChromaBackend
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from sage.middleware.components.vector_stores.chroma import ChromaBackend, ChromaUtils
|
|
16
|
+
from sage.middleware.components.vector_stores.chroma_adapter import ChromaVectorStoreAdapter
|
|
17
|
+
from sage.middleware.components.vector_stores.milvus import MilvusBackend, MilvusUtils
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"MilvusBackend",
|
|
21
|
+
"MilvusUtils",
|
|
22
|
+
"ChromaBackend",
|
|
23
|
+
"ChromaUtils",
|
|
24
|
+
"ChromaVectorStoreAdapter",
|
|
25
|
+
]
|