detectkit 0.2.4__tar.gz → 0.2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. {detectkit-0.2.4/detectkit.egg-info → detectkit-0.2.6}/PKG-INFO +1 -1
  2. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/cli/commands/run.py +15 -1
  3. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/config/project_config.py +3 -0
  4. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/database/clickhouse_manager.py +50 -1
  5. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/database/internal_tables.py +103 -1
  6. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/database/manager.py +48 -0
  7. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/database/tables.py +61 -0
  8. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/orchestration/task_manager.py +18 -1
  9. {detectkit-0.2.4 → detectkit-0.2.6/detectkit.egg-info}/PKG-INFO +1 -1
  10. {detectkit-0.2.4 → detectkit-0.2.6}/pyproject.toml +1 -1
  11. {detectkit-0.2.4 → detectkit-0.2.6}/LICENSE +0 -0
  12. {detectkit-0.2.4 → detectkit-0.2.6}/MANIFEST.in +0 -0
  13. {detectkit-0.2.4 → detectkit-0.2.6}/README.md +0 -0
  14. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/__init__.py +0 -0
  15. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/__init__.py +0 -0
  16. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/channels/__init__.py +0 -0
  17. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/channels/base.py +0 -0
  18. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/channels/email.py +0 -0
  19. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/channels/factory.py +0 -0
  20. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/channels/mattermost.py +0 -0
  21. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/channels/slack.py +0 -0
  22. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/channels/telegram.py +0 -0
  23. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/channels/webhook.py +0 -0
  24. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/alerting/orchestrator.py +0 -0
  25. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/cli/__init__.py +0 -0
  26. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/cli/commands/__init__.py +0 -0
  27. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/cli/commands/init.py +0 -0
  28. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/cli/commands/test_alert.py +0 -0
  29. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/cli/main.py +0 -0
  30. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/config/__init__.py +0 -0
  31. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/config/metric_config.py +0 -0
  32. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/config/profile.py +0 -0
  33. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/config/validator.py +0 -0
  34. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/core/__init__.py +0 -0
  35. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/core/interval.py +0 -0
  36. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/core/models.py +0 -0
  37. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/database/__init__.py +0 -0
  38. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/detectors/__init__.py +0 -0
  39. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/detectors/base.py +0 -0
  40. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/detectors/factory.py +0 -0
  41. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/detectors/statistical/__init__.py +0 -0
  42. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/detectors/statistical/iqr.py +0 -0
  43. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/detectors/statistical/mad.py +0 -0
  44. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/detectors/statistical/manual_bounds.py +0 -0
  45. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/detectors/statistical/zscore.py +0 -0
  46. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/loaders/__init__.py +0 -0
  47. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/loaders/metric_loader.py +0 -0
  48. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/loaders/query_template.py +0 -0
  49. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/orchestration/__init__.py +0 -0
  50. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/utils/__init__.py +0 -0
  51. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit/utils/stats.py +0 -0
  52. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit.egg-info/SOURCES.txt +0 -0
  53. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit.egg-info/dependency_links.txt +0 -0
  54. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit.egg-info/entry_points.txt +0 -0
  55. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit.egg-info/requires.txt +0 -0
  56. {detectkit-0.2.4 → detectkit-0.2.6}/detectkit.egg-info/top_level.txt +0 -0
  57. {detectkit-0.2.4 → detectkit-0.2.6}/requirements.txt +0 -0
  58. {detectkit-0.2.4 → detectkit-0.2.6}/setup.cfg +0 -0
  59. {detectkit-0.2.4 → detectkit-0.2.6}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: detectkit
3
- Version: 0.2.4
3
+ Version: 0.2.6
4
4
  Summary: Metric monitoring with automatic anomaly detection
5
5
  Author: detectkit team
6
6
  License: MIT
@@ -12,6 +12,7 @@ import click
12
12
 
13
13
  from detectkit.config.metric_config import MetricConfig
14
14
  from detectkit.config.profile import ProfilesConfig
15
+ from detectkit.config.project_config import ProjectConfig
15
16
  from detectkit.config.validator import validate_metric_uniqueness
16
17
  from detectkit.database.internal_tables import InternalTablesManager
17
18
  from detectkit.orchestration.task_manager import PipelineStep, TaskManager
@@ -63,7 +64,18 @@ def run_command(
63
64
  click.echo(f"Project root: {project_root}")
64
65
 
65
66
  # Load project config
66
- # project_config = load_project_config(project_root)
67
+ project_config_path = project_root / "detectkit_project.yml"
68
+ try:
69
+ project_config = ProjectConfig.from_yaml_file(project_config_path)
70
+ except Exception as e:
71
+ click.echo(
72
+ click.style(
73
+ f"Error loading detectkit_project.yml: {e}",
74
+ fg="red",
75
+ bold=True,
76
+ )
77
+ )
78
+ return
67
79
 
68
80
  # Select metrics based on selector
69
81
  # Returns list of (path, config) tuples with uniqueness validation
@@ -169,6 +181,7 @@ def run_command(
169
181
  internal_manager=internal_manager,
170
182
  db_manager=db_manager,
171
183
  profiles_config=profiles_config,
184
+ project_config=project_config,
172
185
  )
173
186
 
174
187
  # Process each metric
@@ -457,6 +470,7 @@ def process_metric(
457
470
  to_date=to_date,
458
471
  full_refresh=full_refresh,
459
472
  force=force,
473
+ metric_file_path=str(metric_path),
460
474
  )
461
475
 
462
476
  # Display results - task_manager already printed details
@@ -33,6 +33,7 @@ class ProjectTablesConfig(BaseModel):
33
33
  datapoints: Default datapoints table name
34
34
  detections: Default detections table name
35
35
  tasks: Default tasks table name
36
+ metrics: Default metrics configuration table name
36
37
  """
37
38
 
38
39
  datapoints: str = Field(
@@ -42,6 +43,7 @@ class ProjectTablesConfig(BaseModel):
42
43
  default="_dtk_detections", description="Default detections table"
43
44
  )
44
45
  tasks: str = Field(default="_dtk_tasks", description="Default tasks table")
46
+ metrics: str = Field(default="_dtk_metrics", description="Default metrics config table")
45
47
 
46
48
 
47
49
  class ProjectTimeoutsConfig(BaseModel):
@@ -95,6 +97,7 @@ class ProjectConfig(BaseModel):
95
97
  datapoints: "_dtk_datapoints"
96
98
  detections: "_dtk_detections"
97
99
  tasks: "_dtk_tasks"
100
+ metrics: "_dtk_metrics"
98
101
 
99
102
  timeouts:
100
103
  load: 3600
@@ -300,7 +300,20 @@ class ClickHouseDatabaseManager(BaseDatabaseManager):
300
300
  result = self.execute_query(query, {"metric_name": metric_name})
301
301
 
302
302
  if result and result[0]["last_ts"]:
303
- return result[0]["last_ts"]
303
+ last_ts = result[0]["last_ts"]
304
+
305
+ # ClickHouse returns epoch (1970-01-01 00:00:00) for NULL datetime
306
+ # Detect this and treat as None to avoid loading from 1970
307
+ epoch = datetime(1970, 1, 1, 0, 0, 0)
308
+
309
+ # Handle both timezone-aware and naive datetimes
310
+ if last_ts.tzinfo is not None:
311
+ epoch = epoch.replace(tzinfo=last_ts.tzinfo)
312
+
313
+ if last_ts == epoch:
314
+ return None
315
+
316
+ return last_ts
304
317
 
305
318
  return None
306
319
 
@@ -377,6 +390,42 @@ class ClickHouseDatabaseManager(BaseDatabaseManager):
377
390
  conflict_strategy="ignore"
378
391
  )
379
392
 
393
+ def upsert_record(
394
+ self,
395
+ table_name: str,
396
+ key_columns: Dict[str, Any],
397
+ data: Dict[str, np.ndarray]
398
+ ) -> int:
399
+ """
400
+ Upsert record in ClickHouse using DELETE + INSERT pattern.
401
+
402
+ ClickHouse doesn't have native UPSERT, so we explicitly delete
403
+ the old record (if exists) and then insert the new one.
404
+
405
+ Args:
406
+ table_name: Fully qualified table name
407
+ key_columns: Dict of column names to values for WHERE clause
408
+ data: Dict of column names to numpy arrays for INSERT
409
+
410
+ Returns:
411
+ Number of rows inserted (typically 1)
412
+ """
413
+ # Step 1: DELETE existing record (if any)
414
+ where_parts = [f"{col} = %({col})s" for col in key_columns.keys()]
415
+ delete_query = f"""
416
+ ALTER TABLE {table_name}
417
+ DELETE WHERE {' AND '.join(where_parts)}
418
+ """
419
+
420
+ self._client.execute(delete_query, key_columns)
421
+
422
+ # Step 2: INSERT new record
423
+ return self.insert_batch(
424
+ table_name,
425
+ data,
426
+ conflict_strategy="ignore"
427
+ )
428
+
380
429
  @property
381
430
  def internal_location(self) -> str:
382
431
  """Get internal database name."""
@@ -2,12 +2,13 @@
2
2
  Internal tables manager for detectk.
3
3
 
4
4
  High-level wrapper over BaseDatabaseManager for working with internal tables
5
- (_dtk_datapoints, _dtk_detections, _dtk_tasks).
5
+ (_dtk_datapoints, _dtk_detections, _dtk_tasks, _dtk_metrics).
6
6
 
7
7
  This class provides convenient methods that use the UNIVERSAL BaseDatabaseManager
8
8
  methods underneath. It does NOT duplicate logic - just provides semantic wrappers.
9
9
  """
10
10
 
11
+ import json
11
12
  from datetime import datetime, timezone
12
13
  from typing import Dict, List, Optional
13
14
 
@@ -18,6 +19,7 @@ from detectkit.database.tables import (
18
19
  INTERNAL_TABLES,
19
20
  TABLE_DATAPOINTS,
20
21
  TABLE_DETECTIONS,
22
+ TABLE_METRICS,
21
23
  TABLE_TASKS,
22
24
  )
23
25
 
@@ -722,3 +724,103 @@ class InternalTablesManager:
722
724
  status="running",
723
725
  last_processed_timestamp=last_processed_timestamp,
724
726
  )
727
+
728
+ def upsert_metric_config(
729
+ self,
730
+ metric_config, # MetricConfig type (avoiding circular import)
731
+ file_path: str,
732
+ table_name_override: Optional[str] = None
733
+ ) -> int:
734
+ """
735
+ Save or update metric configuration metadata to _dtk_metrics table.
736
+
737
+ This table is INFORMATIONAL ONLY - used by analysts for dashboards.
738
+ It does NOT affect library logic.
739
+
740
+ Updated on every dtk run via DELETE + INSERT pattern for guaranteed uniqueness.
741
+
742
+ Args:
743
+ metric_config: MetricConfig instance
744
+ file_path: Path to .yml config file
745
+ table_name_override: Optional override for table name (from ProjectConfig)
746
+
747
+ Returns:
748
+ Number of rows inserted (typically 1)
749
+
750
+ Example:
751
+ >>> internal.upsert_metric_config(
752
+ ... metric_config=config,
753
+ ... file_path="metrics/cpu_usage.yml",
754
+ ... table_name_override="_dtk_metrics"
755
+ ... )
756
+ """
757
+ # Get table name (use override if provided, else default)
758
+ table_name = table_name_override or TABLE_METRICS
759
+ full_table_name = self._manager.get_full_table_name(
760
+ table_name, use_internal=True
761
+ )
762
+
763
+ # Get current UTC time (naive for numpy compatibility)
764
+ now = datetime.now(timezone.utc).replace(tzinfo=None)
765
+
766
+ # Parse loading_start_time if provided
767
+ loading_start_time_dt = None
768
+ if metric_config.loading_start_time:
769
+ try:
770
+ from datetime import datetime as dt
771
+ loading_start_time_dt = dt.strptime(
772
+ metric_config.loading_start_time,
773
+ "%Y-%m-%d %H:%M:%S"
774
+ ).replace(tzinfo=None)
775
+ except (ValueError, AttributeError):
776
+ # If parsing fails, leave as None
777
+ pass
778
+
779
+ # Extract alert configuration
780
+ is_alert_enabled = 0
781
+ timezone_str = None
782
+ direction = None
783
+ consecutive_anomalies = 3
784
+ no_data_alert = 0
785
+ min_detectors = 1
786
+
787
+ if metric_config.alerting:
788
+ is_alert_enabled = 1 if metric_config.alerting.enabled else 0
789
+ timezone_str = metric_config.alerting.timezone
790
+ direction = metric_config.alerting.direction
791
+ consecutive_anomalies = metric_config.alerting.consecutive_anomalies
792
+ no_data_alert = 1 if metric_config.alerting.no_data_alert else 0
793
+ min_detectors = metric_config.alerting.min_detectors
794
+
795
+ # Prepare data for INSERT
796
+ data = {
797
+ "metric_name": np.array([metric_config.name]),
798
+ "description": np.array([getattr(metric_config, 'description', None)]),
799
+ "path": np.array([file_path]),
800
+ "interval": np.array([str(metric_config.interval)]),
801
+ "loading_start_time": np.array(
802
+ [loading_start_time_dt], dtype="datetime64[ms]"
803
+ ) if loading_start_time_dt else np.array([None]),
804
+ "loading_batch_size": np.array(
805
+ [metric_config.loading_batch_size], dtype=np.uint32
806
+ ),
807
+ "is_alert_enabled": np.array([is_alert_enabled], dtype=np.uint8),
808
+ "timezone": np.array([timezone_str]),
809
+ "direction": np.array([direction]),
810
+ "consecutive_anomalies": np.array(
811
+ [consecutive_anomalies], dtype=np.uint32
812
+ ),
813
+ "no_data_alert": np.array([no_data_alert], dtype=np.uint8),
814
+ "min_detectors": np.array([min_detectors], dtype=np.uint32),
815
+ "tags": np.array([json.dumps(metric_config.tags or [])]),
816
+ "enabled": np.array([1 if metric_config.enabled else 0], dtype=np.uint8),
817
+ "created_at": np.array([now], dtype="datetime64[ms]"),
818
+ "updated_at": np.array([now], dtype="datetime64[ms]"),
819
+ }
820
+
821
+ # Use upsert_record for DELETE + INSERT pattern
822
+ return self._manager.upsert_record(
823
+ table_name=full_table_name,
824
+ key_columns={"metric_name": metric_config.name},
825
+ data=data
826
+ )
@@ -245,6 +245,54 @@ class BaseDatabaseManager(ABC):
245
245
  """
246
246
  pass
247
247
 
248
+ @abstractmethod
249
+ def upsert_record(
250
+ self,
251
+ table_name: str,
252
+ key_columns: Dict[str, Any],
253
+ data: Dict[str, np.ndarray]
254
+ ) -> int:
255
+ """
256
+ Delete record by key columns, then insert new record.
257
+
258
+ This is a universal database-agnostic upsert pattern that guarantees
259
+ uniqueness by explicitly deleting old record before inserting new one.
260
+
261
+ Use this when ReplacingMergeTree or native UPSERT is not suitable
262
+ (e.g., for informational tables where guaranteed uniqueness is required).
263
+
264
+ Implementation varies by database:
265
+ - ClickHouse: ALTER TABLE ... DELETE + INSERT
266
+ - PostgreSQL: DELETE + INSERT (in transaction)
267
+ - MySQL: DELETE + INSERT (in transaction)
268
+
269
+ Args:
270
+ table_name: Fully qualified table name
271
+ key_columns: Dict of column names to values for WHERE clause
272
+ (e.g., {"metric_name": "cpu_usage"})
273
+ data: Dict of column names to numpy arrays for INSERT
274
+ (must include all key columns)
275
+
276
+ Returns:
277
+ Number of rows inserted (typically 1)
278
+
279
+ Raises:
280
+ DatabaseError: If operation fails
281
+
282
+ Example:
283
+ >>> manager.upsert_record(
284
+ ... table_name="detectk_internal._dtk_metrics",
285
+ ... key_columns={"metric_name": "cpu_usage"},
286
+ ... data={
287
+ ... "metric_name": np.array(["cpu_usage"]),
288
+ ... "interval": np.array(["10min"]),
289
+ ... "enabled": np.array([1]),
290
+ ... ...
291
+ ... }
292
+ ... )
293
+ """
294
+ pass
295
+
248
296
  @property
249
297
  @abstractmethod
250
298
  def internal_location(self) -> str:
@@ -5,6 +5,7 @@ Defines schemas for internal tables:
5
5
  - _dtk_datapoints: Metric data points
6
6
  - _dtk_detections: Anomaly detections
7
7
  - _dtk_tasks: Task status and locking
8
+ - _dtk_metrics: Metric configuration metadata (informational)
8
9
  """
9
10
 
10
11
  from detectkit.core.models import ColumnDefinition, TableModel
@@ -125,14 +126,74 @@ def get_tasks_table_model() -> TableModel:
125
126
  )
126
127
 
127
128
 
129
+ def get_metrics_table_model() -> TableModel:
130
+ """
131
+ Get TableModel for _dtk_metrics table.
132
+
133
+ This table stores metric configuration metadata for analytics dashboards.
134
+ It is INFORMATIONAL ONLY - does not affect library logic.
135
+ Updated on every dtk run via DELETE + INSERT pattern.
136
+
137
+ Schema:
138
+ - metric_name: Metric identifier (PRIMARY KEY)
139
+ - description: Optional metric description
140
+ - path: Path to .yml config file
141
+ - interval: Interval as string ("10min", "1h", etc.)
142
+ - loading_start_time: Start time for initial data loading
143
+ - loading_batch_size: Batch size for loading operations
144
+ - is_alert_enabled: Whether alerting is enabled (0/1)
145
+ - timezone: Timezone for alerts (e.g., "Europe/Moscow")
146
+ - direction: Required anomaly direction ("same", "any", "up", "down")
147
+ - consecutive_anomalies: Consecutive anomalies to trigger alert
148
+ - no_data_alert: Whether to alert on missing data (0/1)
149
+ - min_detectors: Minimum detectors that must agree
150
+ - tags: JSON array of tags
151
+ - enabled: Whether metric is enabled for processing (0/1)
152
+ - created_at: First time config was saved (UTC, millisecond precision)
153
+ - updated_at: Last config update (UTC, millisecond precision)
154
+
155
+ Primary Key: (metric_name)
156
+ Engine: MergeTree (uses DELETE + INSERT for guaranteed uniqueness)
157
+ """
158
+ return TableModel(
159
+ columns=[
160
+ ColumnDefinition("metric_name", "String"),
161
+ ColumnDefinition("description", "Nullable(String)", nullable=True),
162
+ ColumnDefinition("path", "String"),
163
+ ColumnDefinition("interval", "String"),
164
+ ColumnDefinition(
165
+ "loading_start_time",
166
+ "Nullable(DateTime64(3, 'UTC'))",
167
+ nullable=True
168
+ ),
169
+ ColumnDefinition("loading_batch_size", "UInt32"),
170
+ ColumnDefinition("is_alert_enabled", "UInt8"),
171
+ ColumnDefinition("timezone", "Nullable(String)", nullable=True),
172
+ ColumnDefinition("direction", "Nullable(String)", nullable=True),
173
+ ColumnDefinition("consecutive_anomalies", "UInt32"),
174
+ ColumnDefinition("no_data_alert", "UInt8"),
175
+ ColumnDefinition("min_detectors", "UInt32"),
176
+ ColumnDefinition("tags", "String"),
177
+ ColumnDefinition("enabled", "UInt8"),
178
+ ColumnDefinition("created_at", "DateTime64(3, 'UTC')"),
179
+ ColumnDefinition("updated_at", "DateTime64(3, 'UTC')"),
180
+ ],
181
+ primary_key=["metric_name"],
182
+ engine="MergeTree",
183
+ order_by=["metric_name"],
184
+ )
185
+
186
+
128
187
  # Table names as constants
129
188
  TABLE_DATAPOINTS = "_dtk_datapoints"
130
189
  TABLE_DETECTIONS = "_dtk_detections"
131
190
  TABLE_TASKS = "_dtk_tasks"
191
+ TABLE_METRICS = "_dtk_metrics"
132
192
 
133
193
  # Map of table names to model factories
134
194
  INTERNAL_TABLES = {
135
195
  TABLE_DATAPOINTS: get_datapoints_table_model,
136
196
  TABLE_DETECTIONS: get_detections_table_model,
137
197
  TABLE_TASKS: get_tasks_table_model,
198
+ TABLE_METRICS: get_metrics_table_model,
138
199
  }
@@ -73,6 +73,7 @@ class TaskManager:
73
73
  internal_manager: InternalTablesManager,
74
74
  db_manager, # BaseDatabaseManager
75
75
  profiles_config=None, # ProfilesConfig (optional for backward compatibility)
76
+ project_config=None, # ProjectConfig (for table name overrides)
76
77
  ):
77
78
  """
78
79
  Initialize task manager.
@@ -81,10 +82,12 @@ class TaskManager:
81
82
  internal_manager: Manager for internal detectk tables
82
83
  db_manager: Database manager for metric data
83
84
  profiles_config: Profiles configuration (for alert channels)
85
+ project_config: Project configuration (for table name overrides)
84
86
  """
85
87
  self.internal = internal_manager
86
88
  self.db_manager = db_manager
87
89
  self.profiles_config = profiles_config
90
+ self.project_config = project_config
88
91
 
89
92
  def run_metric(
90
93
  self,
@@ -94,6 +97,7 @@ class TaskManager:
94
97
  to_date: Optional[datetime] = None,
95
98
  full_refresh: bool = False,
96
99
  force: bool = False,
100
+ metric_file_path: Optional[str] = None,
97
101
  ) -> Dict[str, any]:
98
102
  """
99
103
  Run metric processing pipeline.
@@ -105,6 +109,7 @@ class TaskManager:
105
109
  to_date: End date for data loading (optional)
106
110
  full_refresh: Delete all existing data and reload from scratch
107
111
  force: Ignore task locks
112
+ metric_file_path: Path to metric .yml file (for _dtk_metrics table)
108
113
 
109
114
  Returns:
110
115
  Dict with execution results:
@@ -139,7 +144,19 @@ class TaskManager:
139
144
  }
140
145
 
141
146
  try:
142
- # Step 0: Acquire lock
147
+ # Step 0a: Save metric configuration to _dtk_metrics (informational)
148
+ if metric_file_path:
149
+ metrics_table_name = None
150
+ if self.project_config and hasattr(self.project_config, 'tables'):
151
+ metrics_table_name = self.project_config.tables.metrics
152
+
153
+ self.internal.upsert_metric_config(
154
+ metric_config=config,
155
+ file_path=metric_file_path,
156
+ table_name_override=metrics_table_name
157
+ )
158
+
159
+ # Step 0b: Acquire lock
143
160
  if not force:
144
161
  # Default timeout: 1 hour (can be overridden via ProjectConfig in future)
145
162
  timeout_seconds = 3600
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: detectkit
3
- Version: 0.2.4
3
+ Version: 0.2.6
4
4
  Summary: Metric monitoring with automatic anomaly detection
5
5
  Author: detectkit team
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "detectkit"
7
- version = "0.2.4"
7
+ version = "0.2.6"
8
8
  description = "Metric monitoring with automatic anomaly detection"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.10"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes