prismiq 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prismiq/__init__.py +543 -0
- prismiq/api.py +1889 -0
- prismiq/auth.py +108 -0
- prismiq/cache.py +527 -0
- prismiq/calculated_field_processor.py +231 -0
- prismiq/calculated_fields.py +819 -0
- prismiq/dashboard_store.py +1219 -0
- prismiq/dashboards.py +374 -0
- prismiq/dates.py +247 -0
- prismiq/engine.py +1315 -0
- prismiq/executor.py +345 -0
- prismiq/filter_merge.py +397 -0
- prismiq/formatting.py +298 -0
- prismiq/logging.py +489 -0
- prismiq/metrics.py +536 -0
- prismiq/middleware.py +346 -0
- prismiq/permissions.py +87 -0
- prismiq/persistence/__init__.py +45 -0
- prismiq/persistence/models.py +208 -0
- prismiq/persistence/postgres_store.py +1119 -0
- prismiq/persistence/saved_query_store.py +336 -0
- prismiq/persistence/schema.sql +95 -0
- prismiq/persistence/setup.py +222 -0
- prismiq/persistence/tables.py +76 -0
- prismiq/pins.py +72 -0
- prismiq/py.typed +0 -0
- prismiq/query.py +1233 -0
- prismiq/schema.py +333 -0
- prismiq/schema_config.py +354 -0
- prismiq/sql_utils.py +147 -0
- prismiq/sql_validator.py +219 -0
- prismiq/sqlalchemy_builder.py +577 -0
- prismiq/timeseries.py +410 -0
- prismiq/transforms.py +471 -0
- prismiq/trends.py +573 -0
- prismiq/types.py +688 -0
- prismiq-0.1.0.dist-info/METADATA +109 -0
- prismiq-0.1.0.dist-info/RECORD +39 -0
- prismiq-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
"""PostgreSQL-backed saved query storage with tenant isolation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import builtins
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import uuid
|
|
9
|
+
from datetime import datetime, timezone
|
|
10
|
+
from typing import TYPE_CHECKING, Any
|
|
11
|
+
|
|
12
|
+
from sqlalchemy import (
|
|
13
|
+
Boolean,
|
|
14
|
+
Column,
|
|
15
|
+
MetaData,
|
|
16
|
+
String,
|
|
17
|
+
Table,
|
|
18
|
+
delete,
|
|
19
|
+
insert,
|
|
20
|
+
select,
|
|
21
|
+
update,
|
|
22
|
+
)
|
|
23
|
+
from sqlalchemy.dialects.postgresql import JSONB, TIMESTAMP, UUID
|
|
24
|
+
|
|
25
|
+
from prismiq.types import QueryDefinition, SavedQuery, SavedQueryCreate, SavedQueryUpdate
|
|
26
|
+
|
|
27
|
+
if TYPE_CHECKING:
|
|
28
|
+
from asyncpg import Pool # type: ignore[import-not-found]
|
|
29
|
+
|
|
30
|
+
_logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
# SQLAlchemy Table definition for saved queries (used for query generation)
|
|
33
|
+
# quote=True ensures all identifiers are double-quoted in generated SQL
|
|
34
|
+
_metadata = MetaData()
|
|
35
|
+
_saved_queries_table = Table(
|
|
36
|
+
"prismiq_saved_queries",
|
|
37
|
+
_metadata,
|
|
38
|
+
Column("id", UUID, primary_key=True, quote=True),
|
|
39
|
+
Column("tenant_id", String(255), nullable=False, quote=True),
|
|
40
|
+
Column("name", String(255), nullable=False, quote=True),
|
|
41
|
+
Column("description", String, nullable=True, quote=True),
|
|
42
|
+
Column("query", JSONB, nullable=False, quote=True),
|
|
43
|
+
Column("owner_id", String(255), nullable=True, quote=True),
|
|
44
|
+
Column("is_shared", Boolean, nullable=False, quote=True),
|
|
45
|
+
Column("created_at", TIMESTAMP(timezone=True), nullable=False, quote=True),
|
|
46
|
+
Column("updated_at", TIMESTAMP(timezone=True), nullable=False, quote=True),
|
|
47
|
+
quote=True,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class SavedQueryStore:
|
|
52
|
+
"""PostgreSQL-backed saved query storage with tenant isolation.
|
|
53
|
+
|
|
54
|
+
All operations are scoped to a tenant_id for multi-tenant security.
|
|
55
|
+
Supports per-tenant PostgreSQL schema isolation via schema_name
|
|
56
|
+
parameter.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def __init__(self, pool: Pool) -> None:
|
|
60
|
+
"""Initialize SavedQueryStore.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
pool: asyncpg connection pool
|
|
64
|
+
"""
|
|
65
|
+
self._pool = pool
|
|
66
|
+
|
|
67
|
+
async def _set_search_path(self, conn: Any, schema_name: str | None) -> None:
|
|
68
|
+
"""Set PostgreSQL search_path for schema isolation.
|
|
69
|
+
|
|
70
|
+
Uses session-scoped set_config so the search_path persists across
|
|
71
|
+
statements on the same connection.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
conn: asyncpg connection
|
|
75
|
+
schema_name: Schema name to use, or None for default (public)
|
|
76
|
+
"""
|
|
77
|
+
if schema_name:
|
|
78
|
+
# Build search_path value with safely quoted schema identifier
|
|
79
|
+
# Double any embedded double-quotes to escape them in the identifier
|
|
80
|
+
escaped_schema = schema_name.replace('"', '""')
|
|
81
|
+
search_path_value = f'"{escaped_schema}", "public"'
|
|
82
|
+
_logger.debug("[saved_query_store] Setting search_path to: %s", search_path_value)
|
|
83
|
+
await conn.fetchval("SELECT set_config('search_path', $1, false)", search_path_value)
|
|
84
|
+
else:
|
|
85
|
+
_logger.debug('[saved_query_store] Setting search_path to: "public"')
|
|
86
|
+
await conn.fetchval("SELECT set_config('search_path', $1, false)", '"public"')
|
|
87
|
+
|
|
88
|
+
async def list(
|
|
89
|
+
self,
|
|
90
|
+
tenant_id: str,
|
|
91
|
+
user_id: str | None = None,
|
|
92
|
+
schema_name: str | None = None,
|
|
93
|
+
) -> builtins.list[SavedQuery]:
|
|
94
|
+
"""List saved queries for a tenant.
|
|
95
|
+
|
|
96
|
+
Returns queries owned by the user or shared with all users. If
|
|
97
|
+
user_id is None, returns all queries for the tenant.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
tenant_id: Tenant ID for isolation.
|
|
101
|
+
user_id: Optional user ID to filter by access.
|
|
102
|
+
schema_name: PostgreSQL schema name for per-tenant schema isolation.
|
|
103
|
+
"""
|
|
104
|
+
t = _saved_queries_table
|
|
105
|
+
stmt = select(t).where(t.c.tenant_id == tenant_id)
|
|
106
|
+
|
|
107
|
+
if user_id:
|
|
108
|
+
# Return user's queries and shared queries
|
|
109
|
+
from sqlalchemy import or_
|
|
110
|
+
|
|
111
|
+
stmt = stmt.where(
|
|
112
|
+
or_(
|
|
113
|
+
t.c.owner_id == user_id,
|
|
114
|
+
t.c.is_shared.is_(True),
|
|
115
|
+
t.c.owner_id.is_(None),
|
|
116
|
+
)
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
stmt = stmt.order_by(t.c.name.asc())
|
|
120
|
+
|
|
121
|
+
sql, params = self._compile_query(stmt)
|
|
122
|
+
async with self._pool.acquire() as conn:
|
|
123
|
+
await self._set_search_path(conn, schema_name)
|
|
124
|
+
rows = await conn.fetch(sql, *params)
|
|
125
|
+
return [self._row_to_saved_query(row) for row in rows]
|
|
126
|
+
|
|
127
|
+
async def get(
|
|
128
|
+
self,
|
|
129
|
+
query_id: str,
|
|
130
|
+
tenant_id: str,
|
|
131
|
+
schema_name: str | None = None,
|
|
132
|
+
) -> SavedQuery | None:
|
|
133
|
+
"""Get a saved query by ID with tenant check.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
query_id: The saved query ID.
|
|
137
|
+
tenant_id: Tenant ID for isolation.
|
|
138
|
+
schema_name: PostgreSQL schema name for per-tenant schema isolation.
|
|
139
|
+
"""
|
|
140
|
+
t = _saved_queries_table
|
|
141
|
+
stmt = select(t).where(
|
|
142
|
+
t.c.id == uuid.UUID(query_id),
|
|
143
|
+
t.c.tenant_id == tenant_id,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
sql, params = self._compile_query(stmt)
|
|
147
|
+
async with self._pool.acquire() as conn:
|
|
148
|
+
await self._set_search_path(conn, schema_name)
|
|
149
|
+
row = await conn.fetchrow(sql, *params)
|
|
150
|
+
if not row:
|
|
151
|
+
return None
|
|
152
|
+
return self._row_to_saved_query(row)
|
|
153
|
+
|
|
154
|
+
async def create(
|
|
155
|
+
self,
|
|
156
|
+
data: SavedQueryCreate,
|
|
157
|
+
tenant_id: str,
|
|
158
|
+
owner_id: str | None = None,
|
|
159
|
+
schema_name: str | None = None,
|
|
160
|
+
) -> SavedQuery:
|
|
161
|
+
"""Create a new saved query.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
data: Saved query creation data.
|
|
165
|
+
tenant_id: Tenant ID for isolation.
|
|
166
|
+
owner_id: Optional owner ID.
|
|
167
|
+
schema_name: PostgreSQL schema name for per-tenant schema isolation.
|
|
168
|
+
"""
|
|
169
|
+
query_id = uuid.uuid4()
|
|
170
|
+
now = datetime.now(timezone.utc)
|
|
171
|
+
|
|
172
|
+
t = _saved_queries_table
|
|
173
|
+
stmt = (
|
|
174
|
+
insert(t)
|
|
175
|
+
.values(
|
|
176
|
+
id=query_id,
|
|
177
|
+
tenant_id=tenant_id,
|
|
178
|
+
name=data.name,
|
|
179
|
+
description=data.description,
|
|
180
|
+
query=json.dumps(data.query.model_dump()),
|
|
181
|
+
owner_id=owner_id,
|
|
182
|
+
is_shared=data.is_shared,
|
|
183
|
+
created_at=now,
|
|
184
|
+
updated_at=now,
|
|
185
|
+
)
|
|
186
|
+
.returning(*t.c)
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
sql, params = self._compile_query(stmt)
|
|
190
|
+
async with self._pool.acquire() as conn:
|
|
191
|
+
await self._set_search_path(conn, schema_name)
|
|
192
|
+
row = await conn.fetchrow(sql, *params)
|
|
193
|
+
return self._row_to_saved_query(row)
|
|
194
|
+
|
|
195
|
+
async def update(
|
|
196
|
+
self,
|
|
197
|
+
query_id: str,
|
|
198
|
+
data: SavedQueryUpdate,
|
|
199
|
+
tenant_id: str,
|
|
200
|
+
user_id: str | None = None,
|
|
201
|
+
schema_name: str | None = None,
|
|
202
|
+
) -> SavedQuery | None:
|
|
203
|
+
"""Update a saved query.
|
|
204
|
+
|
|
205
|
+
Only the owner can update a query.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
query_id: The saved query ID to update.
|
|
209
|
+
data: Update data.
|
|
210
|
+
tenant_id: Tenant ID for isolation.
|
|
211
|
+
user_id: User ID for ownership check.
|
|
212
|
+
schema_name: PostgreSQL schema name for per-tenant schema isolation.
|
|
213
|
+
"""
|
|
214
|
+
# Collect fields to update
|
|
215
|
+
values: dict[str, Any] = {}
|
|
216
|
+
|
|
217
|
+
if data.name is not None:
|
|
218
|
+
values["name"] = data.name
|
|
219
|
+
|
|
220
|
+
if data.description is not None:
|
|
221
|
+
values["description"] = data.description
|
|
222
|
+
|
|
223
|
+
if data.query is not None:
|
|
224
|
+
values["query"] = json.dumps(data.query.model_dump())
|
|
225
|
+
|
|
226
|
+
if data.is_shared is not None:
|
|
227
|
+
values["is_shared"] = data.is_shared
|
|
228
|
+
|
|
229
|
+
if not values:
|
|
230
|
+
# No updates provided, just return current query
|
|
231
|
+
return await self.get(query_id, tenant_id, schema_name)
|
|
232
|
+
|
|
233
|
+
# Always update the timestamp
|
|
234
|
+
values["updated_at"] = datetime.now(timezone.utc)
|
|
235
|
+
|
|
236
|
+
t = _saved_queries_table
|
|
237
|
+
stmt = (
|
|
238
|
+
update(t)
|
|
239
|
+
.where(
|
|
240
|
+
t.c.id == uuid.UUID(query_id),
|
|
241
|
+
t.c.tenant_id == tenant_id,
|
|
242
|
+
)
|
|
243
|
+
.values(**values)
|
|
244
|
+
.returning(*t.c)
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
# Only owner can update
|
|
248
|
+
if user_id:
|
|
249
|
+
stmt = stmt.where(t.c.owner_id == user_id)
|
|
250
|
+
|
|
251
|
+
sql, params = self._compile_query(stmt)
|
|
252
|
+
async with self._pool.acquire() as conn:
|
|
253
|
+
await self._set_search_path(conn, schema_name)
|
|
254
|
+
row = await conn.fetchrow(sql, *params)
|
|
255
|
+
if not row:
|
|
256
|
+
return None
|
|
257
|
+
return self._row_to_saved_query(row)
|
|
258
|
+
|
|
259
|
+
async def delete(
|
|
260
|
+
self,
|
|
261
|
+
query_id: str,
|
|
262
|
+
tenant_id: str,
|
|
263
|
+
user_id: str | None = None,
|
|
264
|
+
schema_name: str | None = None,
|
|
265
|
+
) -> bool:
|
|
266
|
+
"""Delete a saved query.
|
|
267
|
+
|
|
268
|
+
Only the owner can delete a query.
|
|
269
|
+
|
|
270
|
+
Args:
|
|
271
|
+
query_id: The saved query ID to delete.
|
|
272
|
+
tenant_id: Tenant ID for isolation.
|
|
273
|
+
user_id: User ID for ownership check.
|
|
274
|
+
schema_name: PostgreSQL schema name for per-tenant schema isolation.
|
|
275
|
+
"""
|
|
276
|
+
t = _saved_queries_table
|
|
277
|
+
stmt = delete(t).where(
|
|
278
|
+
t.c.id == uuid.UUID(query_id),
|
|
279
|
+
t.c.tenant_id == tenant_id,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
if user_id:
|
|
283
|
+
stmt = stmt.where(t.c.owner_id == user_id)
|
|
284
|
+
|
|
285
|
+
sql, params = self._compile_query(stmt)
|
|
286
|
+
async with self._pool.acquire() as conn:
|
|
287
|
+
await self._set_search_path(conn, schema_name)
|
|
288
|
+
result = await conn.execute(sql, *params)
|
|
289
|
+
return result == "DELETE 1"
|
|
290
|
+
|
|
291
|
+
def _row_to_saved_query(self, row: Any) -> SavedQuery:
|
|
292
|
+
"""Convert a database row to a SavedQuery model."""
|
|
293
|
+
query_data = row["query"]
|
|
294
|
+
if isinstance(query_data, str):
|
|
295
|
+
query_data = json.loads(query_data)
|
|
296
|
+
|
|
297
|
+
return SavedQuery(
|
|
298
|
+
id=str(row["id"]),
|
|
299
|
+
name=row["name"],
|
|
300
|
+
description=row.get("description"),
|
|
301
|
+
query=QueryDefinition(**query_data),
|
|
302
|
+
tenant_id=row["tenant_id"],
|
|
303
|
+
owner_id=row.get("owner_id"),
|
|
304
|
+
is_shared=row.get("is_shared", False),
|
|
305
|
+
created_at=row["created_at"].isoformat() if row.get("created_at") else None,
|
|
306
|
+
updated_at=row["updated_at"].isoformat() if row.get("updated_at") else None,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
@staticmethod
|
|
310
|
+
def _compile_query(stmt: Any) -> tuple[str, builtins.list[Any]]:
|
|
311
|
+
"""Compile a SQLAlchemy statement for asyncpg execution.
|
|
312
|
+
|
|
313
|
+
Converts SQLAlchemy Core statements to SQL strings with positional
|
|
314
|
+
parameters ($1, $2, etc.) compatible with asyncpg.
|
|
315
|
+
|
|
316
|
+
Args:
|
|
317
|
+
stmt: SQLAlchemy Core statement (select, insert, etc.)
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
Tuple of (sql_string, list_of_parameters)
|
|
321
|
+
"""
|
|
322
|
+
from sqlalchemy.dialects import postgresql
|
|
323
|
+
|
|
324
|
+
dialect = postgresql.dialect(paramstyle="numeric")
|
|
325
|
+
compiled = stmt.compile(dialect=dialect, compile_kwargs={"literal_binds": False})
|
|
326
|
+
sql = str(compiled)
|
|
327
|
+
|
|
328
|
+
# Extract parameters in the order they appear in the SQL
|
|
329
|
+
# The compiled.positiontup gives param names in order for positional dialects
|
|
330
|
+
if hasattr(compiled, "positiontup") and compiled.positiontup:
|
|
331
|
+
params = [compiled.params[name] for name in compiled.positiontup]
|
|
332
|
+
else:
|
|
333
|
+
# Fallback: params dict should be ordered in Python 3.7+
|
|
334
|
+
params = list(compiled.params.values())
|
|
335
|
+
|
|
336
|
+
return sql, params
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
-- Prismiq metadata tables
|
|
2
|
+
-- Created in customer's database alongside their data tables
|
|
3
|
+
|
|
4
|
+
-- Dashboards
|
|
5
|
+
CREATE TABLE IF NOT EXISTS prismiq_dashboards (
|
|
6
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
7
|
+
tenant_id VARCHAR(255) NOT NULL,
|
|
8
|
+
name VARCHAR(255) NOT NULL,
|
|
9
|
+
description TEXT,
|
|
10
|
+
layout JSONB NOT NULL DEFAULT '{"columns": 12, "rowHeight": 50, "margin": [10, 10]}',
|
|
11
|
+
filters JSONB NOT NULL DEFAULT '[]',
|
|
12
|
+
owner_id VARCHAR(255),
|
|
13
|
+
is_public BOOLEAN NOT NULL DEFAULT FALSE,
|
|
14
|
+
allowed_viewers TEXT[] NOT NULL DEFAULT '{}',
|
|
15
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
16
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
17
|
+
|
|
18
|
+
CONSTRAINT unique_dashboard_name_per_tenant UNIQUE (tenant_id, name)
|
|
19
|
+
);
|
|
20
|
+
|
|
21
|
+
CREATE INDEX IF NOT EXISTS idx_dashboards_tenant_id ON prismiq_dashboards(tenant_id);
|
|
22
|
+
CREATE INDEX IF NOT EXISTS idx_dashboards_owner_id ON prismiq_dashboards(tenant_id, owner_id);
|
|
23
|
+
|
|
24
|
+
-- Widgets
|
|
25
|
+
CREATE TABLE IF NOT EXISTS prismiq_widgets (
|
|
26
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
27
|
+
dashboard_id UUID NOT NULL REFERENCES prismiq_dashboards(id) ON DELETE CASCADE,
|
|
28
|
+
type VARCHAR(50) NOT NULL,
|
|
29
|
+
title VARCHAR(255) NOT NULL,
|
|
30
|
+
query JSONB, -- Null for text widgets
|
|
31
|
+
position JSONB NOT NULL, -- {x, y, w, h}
|
|
32
|
+
config JSONB NOT NULL DEFAULT '{}',
|
|
33
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
34
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
35
|
+
);
|
|
36
|
+
|
|
37
|
+
CREATE INDEX IF NOT EXISTS idx_widgets_dashboard_id ON prismiq_widgets(dashboard_id);
|
|
38
|
+
|
|
39
|
+
-- Saved queries for reuse
|
|
40
|
+
CREATE TABLE IF NOT EXISTS prismiq_saved_queries (
|
|
41
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
42
|
+
tenant_id VARCHAR(255) NOT NULL,
|
|
43
|
+
name VARCHAR(255) NOT NULL,
|
|
44
|
+
description TEXT,
|
|
45
|
+
query JSONB NOT NULL,
|
|
46
|
+
owner_id VARCHAR(255),
|
|
47
|
+
is_shared BOOLEAN NOT NULL DEFAULT FALSE,
|
|
48
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
49
|
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
50
|
+
|
|
51
|
+
CONSTRAINT unique_query_name_per_tenant UNIQUE (tenant_id, name)
|
|
52
|
+
);
|
|
53
|
+
|
|
54
|
+
CREATE INDEX IF NOT EXISTS idx_saved_queries_tenant ON prismiq_saved_queries(tenant_id);
|
|
55
|
+
|
|
56
|
+
-- Pinned dashboards for context-based quick access
|
|
57
|
+
CREATE TABLE IF NOT EXISTS prismiq_pinned_dashboards (
|
|
58
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
59
|
+
tenant_id VARCHAR(255) NOT NULL,
|
|
60
|
+
user_id VARCHAR(255) NOT NULL,
|
|
61
|
+
dashboard_id UUID NOT NULL REFERENCES prismiq_dashboards(id) ON DELETE CASCADE,
|
|
62
|
+
context VARCHAR(100) NOT NULL,
|
|
63
|
+
position INTEGER NOT NULL DEFAULT 0,
|
|
64
|
+
pinned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
65
|
+
|
|
66
|
+
CONSTRAINT unique_pin_per_context UNIQUE (tenant_id, user_id, dashboard_id, context)
|
|
67
|
+
);
|
|
68
|
+
|
|
69
|
+
CREATE INDEX IF NOT EXISTS idx_pinned_tenant_user_context ON prismiq_pinned_dashboards(tenant_id, user_id, context);
|
|
70
|
+
CREATE INDEX IF NOT EXISTS idx_pinned_dashboard ON prismiq_pinned_dashboards(dashboard_id);
|
|
71
|
+
|
|
72
|
+
-- Function to update updated_at timestamp
|
|
73
|
+
CREATE OR REPLACE FUNCTION prismiq_update_timestamp()
|
|
74
|
+
RETURNS TRIGGER AS $$
|
|
75
|
+
BEGIN
|
|
76
|
+
NEW.updated_at = NOW();
|
|
77
|
+
RETURN NEW;
|
|
78
|
+
END;
|
|
79
|
+
$$ LANGUAGE plpgsql;
|
|
80
|
+
|
|
81
|
+
-- Triggers for auto-updating timestamps
|
|
82
|
+
DROP TRIGGER IF EXISTS prismiq_dashboards_updated ON prismiq_dashboards;
|
|
83
|
+
CREATE TRIGGER prismiq_dashboards_updated
|
|
84
|
+
BEFORE UPDATE ON prismiq_dashboards
|
|
85
|
+
FOR EACH ROW EXECUTE FUNCTION prismiq_update_timestamp();
|
|
86
|
+
|
|
87
|
+
DROP TRIGGER IF EXISTS prismiq_widgets_updated ON prismiq_widgets;
|
|
88
|
+
CREATE TRIGGER prismiq_widgets_updated
|
|
89
|
+
BEFORE UPDATE ON prismiq_widgets
|
|
90
|
+
FOR EACH ROW EXECUTE FUNCTION prismiq_update_timestamp();
|
|
91
|
+
|
|
92
|
+
DROP TRIGGER IF EXISTS prismiq_saved_queries_updated ON prismiq_saved_queries;
|
|
93
|
+
CREATE TRIGGER prismiq_saved_queries_updated
|
|
94
|
+
BEFORE UPDATE ON prismiq_saved_queries
|
|
95
|
+
FOR EACH ROW EXECUTE FUNCTION prismiq_update_timestamp();
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"""Database setup utilities for Prismiq tables."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import re
|
|
7
|
+
from functools import cache
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from asyncpg import Pool # type: ignore[import-not-found]
|
|
13
|
+
from sqlalchemy import Connection
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
# Valid schema name pattern: starts with letter/underscore, alphanumeric thereafter
|
|
18
|
+
_SCHEMA_NAME_PATTERN = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$")
|
|
19
|
+
|
|
20
|
+
# Reserved PostgreSQL schemas that should not be used for tenant data
|
|
21
|
+
_RESERVED_SCHEMAS = frozenset({"public", "information_schema", "pg_catalog", "pg_toast"})
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class TableCreationError(Exception):
|
|
25
|
+
"""Raised when Prismiq tables cannot be created."""
|
|
26
|
+
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _validate_schema_name(schema_name: str) -> None:
|
|
31
|
+
"""Validate schema name for safety and correctness.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
schema_name: PostgreSQL schema name to validate
|
|
35
|
+
|
|
36
|
+
Raises:
|
|
37
|
+
ValueError: If schema name is invalid or reserved
|
|
38
|
+
"""
|
|
39
|
+
if not schema_name:
|
|
40
|
+
raise ValueError("schema_name cannot be empty. Use None for default schema.")
|
|
41
|
+
|
|
42
|
+
if not _SCHEMA_NAME_PATTERN.match(schema_name):
|
|
43
|
+
raise ValueError(
|
|
44
|
+
f"Invalid schema name '{schema_name}'. Schema names must start with "
|
|
45
|
+
f"a letter or underscore and contain only alphanumeric characters."
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
if schema_name.lower() in _RESERVED_SCHEMAS:
|
|
49
|
+
raise ValueError(
|
|
50
|
+
f"Cannot use reserved schema '{schema_name}'. "
|
|
51
|
+
f"Use a tenant-specific schema name instead."
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@cache
|
|
56
|
+
def _get_schema_sql() -> str:
|
|
57
|
+
"""Load SQL from adjacent schema.sql file (lazy, cached).
|
|
58
|
+
|
|
59
|
+
Raises:
|
|
60
|
+
RuntimeError: If schema.sql file is missing (corrupted installation)
|
|
61
|
+
"""
|
|
62
|
+
schema_path = Path(__file__).parent / "schema.sql"
|
|
63
|
+
try:
|
|
64
|
+
return schema_path.read_text()
|
|
65
|
+
except FileNotFoundError:
|
|
66
|
+
raise RuntimeError(
|
|
67
|
+
f"Prismiq schema.sql not found at {schema_path}. "
|
|
68
|
+
f"This indicates a corrupted package installation. "
|
|
69
|
+
f"Try reinstalling: pip install --force-reinstall prismiq"
|
|
70
|
+
) from None
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
async def ensure_tables(pool: Pool) -> None:
|
|
74
|
+
"""Create Prismiq metadata tables if they don't exist.
|
|
75
|
+
|
|
76
|
+
This is idempotent - safe to call multiple times.
|
|
77
|
+
Uses CREATE TABLE IF NOT EXISTS for all tables.
|
|
78
|
+
|
|
79
|
+
Note: This creates tables in the current search_path schema.
|
|
80
|
+
For multi-tenant schema isolation, use ensure_tables_sync() with
|
|
81
|
+
SQLAlchemy, or set search_path before calling this function.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
pool: asyncpg connection pool
|
|
85
|
+
|
|
86
|
+
Raises:
|
|
87
|
+
TableCreationError: If table creation fails
|
|
88
|
+
"""
|
|
89
|
+
try:
|
|
90
|
+
async with pool.acquire() as conn:
|
|
91
|
+
await conn.execute(_get_schema_sql())
|
|
92
|
+
logger.info("Prismiq tables created/verified successfully")
|
|
93
|
+
except Exception as e:
|
|
94
|
+
error_msg = str(e)
|
|
95
|
+
logger.error(f"Failed to create Prismiq tables: {error_msg}")
|
|
96
|
+
raise TableCreationError(
|
|
97
|
+
f"Failed to create Prismiq tables. "
|
|
98
|
+
f"Check database permissions and connectivity. Original error: {error_msg}"
|
|
99
|
+
) from e
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
async def drop_tables(pool: Pool) -> None:
|
|
103
|
+
"""Drop all Prismiq metadata tables.
|
|
104
|
+
|
|
105
|
+
WARNING: This will delete all dashboard and widget data.
|
|
106
|
+
Use with caution - primarily for testing.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
pool: asyncpg connection pool
|
|
110
|
+
"""
|
|
111
|
+
async with pool.acquire() as conn:
|
|
112
|
+
await conn.execute(
|
|
113
|
+
"""
|
|
114
|
+
DROP TABLE IF EXISTS prismiq_pinned_dashboards CASCADE;
|
|
115
|
+
DROP TABLE IF EXISTS prismiq_widgets CASCADE;
|
|
116
|
+
DROP TABLE IF EXISTS prismiq_dashboards CASCADE;
|
|
117
|
+
DROP TABLE IF EXISTS prismiq_saved_queries CASCADE;
|
|
118
|
+
DROP FUNCTION IF EXISTS prismiq_update_timestamp CASCADE;
|
|
119
|
+
"""
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
async def table_exists(pool: Pool, table_name: str) -> bool:
|
|
124
|
+
"""Check if a Prismiq table exists.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
pool: asyncpg connection pool
|
|
128
|
+
table_name: Name of the table to check
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
True if table exists, False otherwise
|
|
132
|
+
"""
|
|
133
|
+
async with pool.acquire() as conn:
|
|
134
|
+
result = await conn.fetchval(
|
|
135
|
+
"""
|
|
136
|
+
SELECT EXISTS (
|
|
137
|
+
SELECT FROM information_schema.tables
|
|
138
|
+
WHERE table_name = $1
|
|
139
|
+
)
|
|
140
|
+
""",
|
|
141
|
+
table_name,
|
|
142
|
+
)
|
|
143
|
+
return bool(result)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
# NOTE: Synchronous I/O exception for Alembic/migration use.
|
|
147
|
+
# Alembic migrations are inherently synchronous, so this helper intentionally
|
|
148
|
+
# uses sync I/O. This is the accepted exception to the async-only guideline.
|
|
149
|
+
# For async application code, use ensure_tables() instead.
|
|
150
|
+
def ensure_tables_sync(connection: Connection, schema_name: str | None = None) -> None:
|
|
151
|
+
"""Create Prismiq tables in specified schema (synchronous).
|
|
152
|
+
|
|
153
|
+
For use with SQLAlchemy sync engines in multi-tenant systems. This function
|
|
154
|
+
creates all Prismiq tables using the declarative models from models.py.
|
|
155
|
+
|
|
156
|
+
This is the recommended approach for:
|
|
157
|
+
- Alembic migrations with schema-based multi-tenancy
|
|
158
|
+
- Programmatic table creation during tenant provisioning
|
|
159
|
+
- Integration with existing SQLAlchemy-based applications
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
connection: SQLAlchemy sync connection
|
|
163
|
+
schema_name: PostgreSQL schema name (e.g., "tenant_123"). If provided,
|
|
164
|
+
tables are created in this schema using schema_translate_map.
|
|
165
|
+
If None, tables are created in the default schema.
|
|
166
|
+
|
|
167
|
+
Raises:
|
|
168
|
+
ValueError: If schema_name is invalid or uses a reserved schema
|
|
169
|
+
TableCreationError: If table creation fails
|
|
170
|
+
|
|
171
|
+
Example:
|
|
172
|
+
from sqlalchemy import create_engine
|
|
173
|
+
from prismiq import ensure_tables_sync
|
|
174
|
+
|
|
175
|
+
engine = create_engine("postgresql://user:pass@localhost/db")
|
|
176
|
+
|
|
177
|
+
# Create in default schema
|
|
178
|
+
with engine.connect() as conn:
|
|
179
|
+
ensure_tables_sync(conn)
|
|
180
|
+
conn.commit()
|
|
181
|
+
|
|
182
|
+
# Create in tenant-specific schema
|
|
183
|
+
with engine.connect() as conn:
|
|
184
|
+
ensure_tables_sync(conn, schema_name="tenant_123")
|
|
185
|
+
conn.commit()
|
|
186
|
+
"""
|
|
187
|
+
from prismiq.persistence.models import PrismiqBase
|
|
188
|
+
|
|
189
|
+
target_schema = schema_name or "default schema"
|
|
190
|
+
|
|
191
|
+
# Validate schema name if provided
|
|
192
|
+
if schema_name is not None:
|
|
193
|
+
_validate_schema_name(schema_name)
|
|
194
|
+
|
|
195
|
+
try:
|
|
196
|
+
if schema_name:
|
|
197
|
+
# Use schema_translate_map for multi-tenant support
|
|
198
|
+
connection = connection.execution_options(schema_translate_map={None: schema_name})
|
|
199
|
+
|
|
200
|
+
logger.info(f"Creating Prismiq tables in {target_schema}")
|
|
201
|
+
PrismiqBase.metadata.create_all(connection)
|
|
202
|
+
logger.info(f"Prismiq tables created/verified in {target_schema}")
|
|
203
|
+
|
|
204
|
+
except ValueError:
|
|
205
|
+
# Re-raise validation errors as-is
|
|
206
|
+
raise
|
|
207
|
+
except Exception as e:
|
|
208
|
+
error_msg = str(e)
|
|
209
|
+
logger.error(f"Failed to create Prismiq tables in {target_schema}: {error_msg}")
|
|
210
|
+
|
|
211
|
+
# Provide actionable error messages for common issues
|
|
212
|
+
if "schema" in error_msg.lower() and "does not exist" in error_msg.lower():
|
|
213
|
+
raise TableCreationError(
|
|
214
|
+
f"Schema '{schema_name}' does not exist. "
|
|
215
|
+
f"Create the schema before calling ensure_tables_sync()."
|
|
216
|
+
) from e
|
|
217
|
+
|
|
218
|
+
raise TableCreationError(
|
|
219
|
+
f"Failed to create Prismiq tables in {target_schema}. "
|
|
220
|
+
f"Check database permissions and schema existence. "
|
|
221
|
+
f"Original error: {error_msg}"
|
|
222
|
+
) from e
|