fh-saas 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,134 @@
1
+ """High-performance JSON-to-database pipeline using Polars vectorized transformations."""
2
+
3
+ # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/09_utils_polars_mapper.ipynb.
4
+
5
+ # %% ../nbs/09_utils_polars_mapper.ipynb 2
6
+ from __future__ import annotations
7
+ import polars as pl
8
+ from sqlalchemy import create_engine, text
9
+ from typing import Dict, List, Optional
10
+ import uuid
11
+ import logging
12
+ from nbdev.showdoc import show_doc
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ # %% auto 0
18
+ __all__ = ['logger', 'map_and_upsert', 'apply_schema']
19
+
20
+ # %% ../nbs/09_utils_polars_mapper.ipynb 5
21
+ def map_and_upsert(
22
+ df: pl.DataFrame, # The raw Polars DataFrame from JSON
23
+ table_name: str, # Target database table name
24
+ key_col: str, # Primary key column for conflict resolution
25
+ db_uri: str, # SQLAlchemy connection string (e.g., 'sqlite:///db.db' or 'postgresql://...')
26
+ column_map: dict = None, # Optional rename map {json_key: db_col}
27
+ unnest_cols: list[str] = None # List of Struct columns to flatten
28
+ ):
29
+ """Map JSON data to database columns and upsert using staging table pattern."""
30
+ # Step 1: Rename columns if mapping provided
31
+ if column_map:
32
+ df = df.rename(column_map)
33
+ logger.info(f"Renamed columns: {column_map}")
34
+
35
+ # Step 2: Flatten nested columns if specified
36
+ if unnest_cols:
37
+ for col in unnest_cols:
38
+ if col in df.columns:
39
+ df = df.unnest(col)
40
+ logger.info(f"Unnested column: {col}")
41
+
42
+ # Step 3: Select only columns that exist in target table (drop extras)
43
+ # This prevents errors from extra JSON fields
44
+ engine = create_engine(db_uri)
45
+
46
+ # Get target table columns
47
+ with engine.connect() as conn:
48
+ result = conn.execute(text(f"SELECT * FROM {table_name} LIMIT 0"))
49
+ target_columns = list(result.keys())
50
+
51
+ # Filter DataFrame to only target columns
52
+ available_cols = [col for col in target_columns if col in df.columns]
53
+ df = df.select(available_cols)
54
+ logger.info(f"Selected columns for {table_name}: {available_cols}")
55
+
56
+ # Step 4: Generate unique staging table name
57
+ staging_table = f"staging_{uuid.uuid4().hex[:8]}"
58
+
59
+ try:
60
+ # Step 5: Write to staging table (fast bulk insert)
61
+ df.write_database(
62
+ table_name=staging_table,
63
+ connection=db_uri,
64
+ if_table_exists='replace'
65
+ )
66
+ logger.info(f"Wrote {len(df)} rows to staging table {staging_table}")
67
+
68
+ # Step 6: Determine database type for dialect-specific SQL
69
+ is_sqlite = 'sqlite' in db_uri.lower()
70
+
71
+ # Step 7: Execute upsert from staging to target
72
+ with engine.connect() as conn:
73
+ if is_sqlite:
74
+ # SQLite: INSERT OR REPLACE
75
+ cols_str = ', '.join(available_cols)
76
+ upsert_sql = f"""
77
+ INSERT OR REPLACE INTO {table_name} ({cols_str})
78
+ SELECT {cols_str} FROM {staging_table}
79
+ """
80
+ else:
81
+ # PostgreSQL: INSERT ... ON CONFLICT DO UPDATE
82
+ cols_str = ', '.join(available_cols)
83
+ update_cols = [col for col in available_cols if col != key_col]
84
+ update_set = ', '.join([f"{col} = EXCLUDED.{col}" for col in update_cols])
85
+
86
+ upsert_sql = f"""
87
+ INSERT INTO {table_name} ({cols_str})
88
+ SELECT {cols_str} FROM {staging_table}
89
+ ON CONFLICT ({key_col}) DO UPDATE SET {update_set}
90
+ """
91
+
92
+ conn.execute(text(upsert_sql))
93
+ conn.commit()
94
+ logger.info(f"Upserted {len(df)} rows into {table_name}")
95
+
96
+ finally:
97
+ # Step 8: Cleanup - drop staging table
98
+ with engine.connect() as conn:
99
+ conn.execute(text(f"DROP TABLE IF EXISTS {staging_table}"))
100
+ conn.commit()
101
+ logger.info(f"Dropped staging table {staging_table}")
102
+
103
+ # %% ../nbs/09_utils_polars_mapper.ipynb 8
104
+ def apply_schema(
105
+ df: pl.DataFrame, # Input DataFrame
106
+ type_map: dict # Column name -> Polars dtype (e.g., {'created_at': pl.Date, 'is_active': pl.Boolean})
107
+ ) -> pl.DataFrame:
108
+ """Apply explicit type conversions to DataFrame columns."""
109
+ conversions = []
110
+
111
+ for col_name, dtype in type_map.items():
112
+ if col_name not in df.columns:
113
+ logger.warning(f"Column {col_name} not found in DataFrame, skipping")
114
+ continue
115
+
116
+ # Handle different type conversions
117
+ if dtype == pl.Date:
118
+ conversions.append(pl.col(col_name).str.strptime(pl.Date, "%Y-%m-%d").alias(col_name))
119
+ elif dtype == pl.Datetime:
120
+ conversions.append(pl.col(col_name).str.strptime(pl.Datetime).alias(col_name))
121
+ elif dtype == pl.Boolean:
122
+ # Handle "true"/"false" strings
123
+ conversions.append(
124
+ pl.col(col_name).str.to_lowercase().eq("true").alias(col_name)
125
+ )
126
+ else:
127
+ # Cast to specified type (works for numeric types)
128
+ conversions.append(pl.col(col_name).cast(dtype).alias(col_name))
129
+
130
+ if conversions:
131
+ df = df.with_columns(conversions)
132
+ logger.info(f"Applied schema conversions to {len(type_map)} columns")
133
+
134
+ return df
fh_saas/utils_seo.py ADDED
@@ -0,0 +1,230 @@
1
+ """Generate meta tags, sitemaps, and structured data for search engines."""
2
+
3
+ # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/12_utils_seo.ipynb.
4
+
5
+ # %% ../nbs/12_utils_seo.ipynb 2
6
+ from __future__ import annotations
7
+ from typing import List, Dict, Optional
8
+ from datetime import datetime
9
+ from html import escape
10
+ from nbdev.showdoc import show_doc
11
+
12
+ # %% auto 0
13
+ __all__ = ['generate_head_tags', 'generate_sitemap_xml', 'generate_rss_xml']
14
+
15
+ # %% ../nbs/12_utils_seo.ipynb 5
16
+ def generate_head_tags(
17
+ title: str, # Page title
18
+ description: str, # Page description (150-160 chars optimal)
19
+ url: str, # Canonical URL
20
+ image_url: Optional[str] = None, # OpenGraph image URL
21
+ article_published: Optional[datetime] = None, # Publication date
22
+ article_modified: Optional[datetime] = None, # Last modified date
23
+ author: Optional[str] = None # Author name
24
+ ) -> List[tuple]:
25
+ """
26
+ Generate meta tags for SEO.
27
+
28
+ Returns list of (tag_name, attributes_dict) tuples for FastHTML components.
29
+ Includes standard, OpenGraph, and Twitter Card tags.
30
+
31
+ Example:
32
+ ```python
33
+ from fasthtml.common import *
34
+
35
+ tags = generate_head_tags(
36
+ title='My Blog Post',
37
+ description='Learn about Python',
38
+ url='https://example.com/blog/my-post',
39
+ image_url='https://example.com/image.jpg'
40
+ )
41
+
42
+ # Use in FastHTML app
43
+ @app.get('/blog/my-post')
44
+ def post():
45
+ return Html(
46
+ Head(
47
+ *[Meta(**attrs) if tag == 'meta' else Link(**attrs)
48
+ for tag, attrs in tags]
49
+ ),
50
+ Body('...')
51
+ )
52
+ ```
53
+ """
54
+ tags = []
55
+
56
+ # Standard meta tags
57
+ tags.append(('title', {'content': title}))
58
+ tags.append(('meta', {'name': 'description', 'content': description}))
59
+
60
+ # Canonical URL
61
+ tags.append(('link', {'rel': 'canonical', 'href': url}))
62
+
63
+ # OpenGraph tags (Facebook, WhatsApp, etc.)
64
+ tags.extend([
65
+ ('meta', {'property': 'og:title', 'content': title}),
66
+ ('meta', {'property': 'og:description', 'content': description}),
67
+ ('meta', {'property': 'og:url', 'content': url}),
68
+ ('meta', {'property': 'og:type', 'content': 'article'})
69
+ ])
70
+
71
+ if image_url:
72
+ tags.append(('meta', {'property': 'og:image', 'content': image_url}))
73
+
74
+ # Article metadata
75
+ if article_published:
76
+ tags.append(('meta', {
77
+ 'property': 'article:published_time',
78
+ 'content': article_published.isoformat()
79
+ }))
80
+
81
+ if article_modified:
82
+ tags.append(('meta', {
83
+ 'property': 'article:modified_time',
84
+ 'content': article_modified.isoformat()
85
+ }))
86
+
87
+ if author:
88
+ tags.append(('meta', {'property': 'article:author', 'content': author}))
89
+
90
+ # Twitter Card tags
91
+ tags.extend([
92
+ ('meta', {'name': 'twitter:card', 'content': 'summary_large_image'}),
93
+ ('meta', {'name': 'twitter:title', 'content': title}),
94
+ ('meta', {'name': 'twitter:description', 'content': description})
95
+ ])
96
+
97
+ if image_url:
98
+ tags.append(('meta', {'name': 'twitter:image', 'content': image_url}))
99
+
100
+ return tags
101
+
102
+ # %% ../nbs/12_utils_seo.ipynb 8
103
+ def generate_sitemap_xml(
104
+ posts: List[Dict], # List of posts from PostLoader
105
+ base_url: str, # Base URL (e.g., 'https://example.com')
106
+ blog_path: str = '/blog' # Blog path prefix
107
+ ) -> str:
108
+ """
109
+ Generate XML sitemap for blog posts.
110
+
111
+ Returns sitemap XML string with proper structure and lastmod dates.
112
+
113
+ Example:
114
+ ```python
115
+ from fasthtml.common import *
116
+ from fh_saas.utils_blog import PostLoader
117
+
118
+ app = FastHTML()
119
+ loader = PostLoader('blog/posts')
120
+
121
+ @app.get('/sitemap.xml')
122
+ def sitemap():
123
+ posts = loader.load_posts()
124
+ xml = generate_sitemap_xml(
125
+ posts=posts,
126
+ base_url='https://example.com',
127
+ blog_path='/blog'
128
+ )
129
+ return Response(xml, media_type='application/xml')
130
+ ```
131
+ """
132
+ xml_parts = [
133
+ '<?xml version="1.0" encoding="UTF-8"?>',
134
+ '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
135
+ ]
136
+
137
+ # Add blog index
138
+ xml_parts.append(f' <url>')
139
+ xml_parts.append(f' <loc>{escape(base_url + blog_path)}</loc>')
140
+ xml_parts.append(f' <changefreq>daily</changefreq>')
141
+ xml_parts.append(f' <priority>1.0</priority>')
142
+ xml_parts.append(f' </url>')
143
+
144
+ # Add each post
145
+ for post in posts:
146
+ url = f"{base_url}{blog_path}/{post['slug']}"
147
+ xml_parts.append(f' <url>')
148
+ xml_parts.append(f' <loc>{escape(url)}</loc>')
149
+
150
+ if post.get('date'):
151
+ lastmod = post['date'].strftime('%Y-%m-%d')
152
+ xml_parts.append(f' <lastmod>{lastmod}</lastmod>')
153
+
154
+ xml_parts.append(f' <changefreq>monthly</changefreq>')
155
+ xml_parts.append(f' <priority>0.8</priority>')
156
+ xml_parts.append(f' </url>')
157
+
158
+ xml_parts.append('</urlset>')
159
+ return '\n'.join(xml_parts)
160
+
161
+ # %% ../nbs/12_utils_seo.ipynb 11
162
+ def generate_rss_xml(
163
+ posts: List[Dict], # List of posts from PostLoader
164
+ blog_title: str, # Blog title
165
+ blog_description: str, # Blog description
166
+ base_url: str, # Base URL
167
+ blog_path: str = '/blog' # Blog path prefix
168
+ ) -> str:
169
+ """
170
+ Generate RSS 2.0 feed for blog posts.
171
+
172
+ Returns RSS XML string for feed readers (Feedly, etc.).
173
+
174
+ Example:
175
+ ```python
176
+ from fasthtml.common import *
177
+ from fh_saas.utils_blog import PostLoader
178
+
179
+ app = FastHTML()
180
+ loader = PostLoader('blog/posts')
181
+
182
+ @app.get('/rss.xml')
183
+ def rss():
184
+ posts = loader.load_posts()[:20] # Latest 20 posts
185
+ xml = generate_rss_xml(
186
+ posts=posts,
187
+ blog_title='My Blog',
188
+ blog_description='Tech tutorials and insights',
189
+ base_url='https://example.com'
190
+ )
191
+ return Response(xml, media_type='application/xml')
192
+ ```
193
+ """
194
+ from email.utils import formatdate
195
+
196
+ xml_parts = [
197
+ '<?xml version="1.0" encoding="UTF-8"?>',
198
+ '<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">',
199
+ ' <channel>',
200
+ f' <title>{escape(blog_title)}</title>',
201
+ f' <link>{escape(base_url + blog_path)}</link>',
202
+ f' <description>{escape(blog_description)}</description>',
203
+ f' <atom:link href="{escape(base_url)}/rss.xml" rel="self" type="application/rss+xml" />'
204
+ ]
205
+
206
+ # Add items
207
+ for post in posts:
208
+ url = f"{base_url}{blog_path}/{post['slug']}"
209
+
210
+ xml_parts.append(' <item>')
211
+ xml_parts.append(f' <title>{escape(post["title"])}</title>')
212
+ xml_parts.append(f' <link>{escape(url)}</link>')
213
+ xml_parts.append(f' <guid>{escape(url)}</guid>')
214
+
215
+ if post.get('description'):
216
+ xml_parts.append(f' <description>{escape(post["description"])}</description>')
217
+
218
+ if post.get('date'):
219
+ pub_date = formatdate(post['date'].timestamp(), usegmt=True)
220
+ xml_parts.append(f' <pubDate>{pub_date}</pubDate>')
221
+
222
+ if post.get('author'):
223
+ xml_parts.append(f' <author>{escape(post["author"])}</author>')
224
+
225
+ xml_parts.append(' </item>')
226
+
227
+ xml_parts.append(' </channel>')
228
+ xml_parts.append('</rss>')
229
+
230
+ return '\n'.join(xml_parts)
fh_saas/utils_sql.py ADDED
@@ -0,0 +1,320 @@
1
+ """Comprehensive SQL helper library for database operations with multi-database support."""
2
+
3
+ # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/02_utils_sql.ipynb.
4
+
5
+ # %% auto 0
6
+ __all__ = ['logger', 'get_db_type', 'validate_params', 'run_id', 'insert_only', 'bulk_insert_only', 'upsert', 'bulk_upsert',
7
+ 'get_by_id', 'update_record', 'delete_record', 'bulk_delete', 'with_transaction', 'paginate_sql',
8
+ 'batch_execute', 'to_cents', 'from_cents']
9
+
10
+ # %% ../nbs/02_utils_sql.ipynb 2
11
+ from fastsql import *
12
+ from sqlalchemy import text
13
+ import os
14
+ import re
15
+ import logging
16
+ from typing import List, Dict, Any, Optional
17
+ from contextlib import contextmanager
18
+ from nbdev.showdoc import show_doc
19
+
20
+ # Module-level logger - configured by app via configure_logging()
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # %% ../nbs/02_utils_sql.ipynb 5
24
+ def get_db_type():
25
+ """Get database type from environment variable"""
26
+ return os.getenv("DB_TYPE", "SQLITE").upper()
27
+
28
+ # %% ../nbs/02_utils_sql.ipynb 8
29
+ def _extract_params(sql: str) -> List[str]:
30
+ """Extract parameter names from SQL string (e.g., :param_name)"""
31
+ return re.findall(r':(\w+)', sql)
32
+
33
+ def validate_params(sql: str, params: Dict[str, Any]) -> None:
34
+ """Validate that all required parameters are provided"""
35
+ required = _extract_params(sql)
36
+ provided = set(params.keys()) if params else set()
37
+ missing = set(required) - provided
38
+
39
+ if missing:
40
+ raise ValueError(f"Missing required parameters: {', '.join(missing)}")
41
+
42
+ def run_id(db: Database, registry: Dict[str, str], query_id: str, params: Optional[Dict[str, Any]] = None) -> Any:
43
+ """Execute a query by ID from a query registry."""
44
+ # Get query from registry
45
+ if query_id not in registry:
46
+ raise ValueError(f"Query ID '{query_id}' not found in registry")
47
+
48
+ sql = registry[query_id]
49
+ params = params or {}
50
+
51
+ # Validate parameters
52
+ validate_params(sql, params)
53
+
54
+ # Execute query
55
+ try:
56
+ result = db.conn.execute(text(sql), params)
57
+ return result
58
+ except Exception as e:
59
+ logger.error(f"Query '{query_id}' failed: {e}", exc_info=True)
60
+ raise Exception(f"Failed to execute query '{query_id}': {str(e)}") from e
61
+
62
+
63
+ # %% ../nbs/02_utils_sql.ipynb 12
64
+ def insert_only(db: Database, table_name: str, record: Dict[str, Any], conflict_cols: List[str], auto_commit: bool = True) -> None:
65
+ """Insert a single record only if it doesn't exist (ignores conflicts)."""
66
+ db_type = get_db_type()
67
+ columns = list(record.keys())
68
+ placeholders = [f":{col}" for col in columns]
69
+
70
+ if db_type == "POSTGRESQL":
71
+ # ON CONFLICT DO NOTHING
72
+ conflict_clause = f"ON CONFLICT ({', '.join(conflict_cols)}) DO NOTHING"
73
+ sql = f"""
74
+ INSERT INTO {table_name} ({', '.join(columns)})
75
+ VALUES ({', '.join(placeholders)})
76
+ {conflict_clause}
77
+ """
78
+ else: # SQLite
79
+ # INSERT OR IGNORE
80
+ sql = f"""
81
+ INSERT OR IGNORE INTO {table_name} ({', '.join(columns)})
82
+ VALUES ({', '.join(placeholders)})
83
+ """
84
+
85
+ try:
86
+ db.conn.execute(text(sql), record)
87
+ if auto_commit:
88
+ db.conn.commit()
89
+ except Exception as e:
90
+ logger.error(f"Insert into '{table_name}' failed: {e}", exc_info=True)
91
+ if auto_commit:
92
+ db.conn.rollback()
93
+ raise Exception(f"Failed to insert record into {table_name}: {str(e)}") from e
94
+
95
+ def bulk_insert_only(db: Database, table_name: str, records: List[Dict[str, Any]], conflict_cols: List[str], auto_commit: bool = True) -> None:
96
+ """Insert multiple records, skipping conflicts (optimized batch operation)."""
97
+ if not records:
98
+ return
99
+
100
+ db_type = get_db_type()
101
+ columns = list(records[0].keys())
102
+ placeholders = [f":{col}" for col in columns]
103
+
104
+ if db_type == "POSTGRESQL":
105
+ conflict_clause = f"ON CONFLICT ({', '.join(conflict_cols)}) DO NOTHING"
106
+ sql = f"""
107
+ INSERT INTO {table_name} ({', '.join(columns)})
108
+ VALUES ({', '.join(placeholders)})
109
+ {conflict_clause}
110
+ """
111
+ else: # SQLite
112
+ sql = f"""
113
+ INSERT OR IGNORE INTO {table_name} ({', '.join(columns)})
114
+ VALUES ({', '.join(placeholders)})
115
+ """
116
+
117
+ try:
118
+ for record in records:
119
+ db.conn.execute(text(sql), record)
120
+ if auto_commit:
121
+ db.conn.commit()
122
+ except Exception as e:
123
+ logger.error(f"Bulk insert into '{table_name}' failed: {e}", exc_info=True)
124
+ if auto_commit:
125
+ db.conn.rollback()
126
+ raise Exception(f"Failed to bulk insert into {table_name}: {str(e)}") from e
127
+
128
+ # %% ../nbs/02_utils_sql.ipynb 16
129
+ def upsert(db: Database, table_name: str, record: Dict[str, Any],
130
+ conflict_cols: List[str], update_cols: Optional[List[str]] = None, auto_commit: bool = True) -> None:
131
+ """Insert a record or update if it exists (upsert)."""
132
+ db_type = get_db_type()
133
+ columns = list(record.keys())
134
+ placeholders = [f":{col}" for col in columns]
135
+
136
+ # Determine which columns to update
137
+ if update_cols is None:
138
+ update_cols = [col for col in columns if col not in conflict_cols]
139
+
140
+ if db_type == "POSTGRESQL":
141
+ # ON CONFLICT DO UPDATE
142
+ update_set = ', '.join([f"{col} = EXCLUDED.{col}" for col in update_cols])
143
+ sql = f"""
144
+ INSERT INTO {table_name} ({', '.join(columns)})
145
+ VALUES ({', '.join(placeholders)})
146
+ ON CONFLICT ({', '.join(conflict_cols)})
147
+ DO UPDATE SET {update_set}
148
+ """
149
+ else: # SQLite
150
+ # INSERT OR REPLACE
151
+ sql = f"""
152
+ INSERT OR REPLACE INTO {table_name} ({', '.join(columns)})
153
+ VALUES ({', '.join(placeholders)})
154
+ """
155
+
156
+ try:
157
+ db.conn.execute(text(sql), record)
158
+ if auto_commit:
159
+ db.conn.commit()
160
+ except Exception as e:
161
+ logger.error(f"Upsert into '{table_name}' failed: {e}", exc_info=True)
162
+ if auto_commit:
163
+ db.conn.rollback()
164
+ raise Exception(f"Failed to upsert record into {table_name}: {str(e)}") from e
165
+
166
+ def bulk_upsert(db: Database, table_name: str, records: List[Dict[str, Any]],
167
+ conflict_cols: List[str], update_cols: Optional[List[str]] = None, auto_commit: bool = True) -> None:
168
+ """Insert or update multiple records (optimized batch operation)."""
169
+ if not records:
170
+ return
171
+
172
+ db_type = get_db_type()
173
+ columns = list(records[0].keys())
174
+ placeholders = [f":{col}" for col in columns]
175
+
176
+ # Determine which columns to update
177
+ if update_cols is None:
178
+ update_cols = [col for col in columns if col not in conflict_cols]
179
+
180
+ if db_type == "POSTGRESQL":
181
+ update_set = ', '.join([f"{col} = EXCLUDED.{col}" for col in update_cols])
182
+ sql = f"""
183
+ INSERT INTO {table_name} ({', '.join(columns)})
184
+ VALUES ({', '.join(placeholders)})
185
+ ON CONFLICT ({', '.join(conflict_cols)})
186
+ DO UPDATE SET {update_set}
187
+ """
188
+ else: # SQLite
189
+ sql = f"""
190
+ INSERT OR REPLACE INTO {table_name} ({', '.join(columns)})
191
+ VALUES ({', '.join(placeholders)})
192
+ """
193
+
194
+ try:
195
+ for record in records:
196
+ db.conn.execute(text(sql), record)
197
+ if auto_commit:
198
+ db.conn.commit()
199
+ except Exception as e:
200
+ logger.error(f"Bulk upsert into '{table_name}' failed: {e}", exc_info=True)
201
+ if auto_commit:
202
+ db.conn.rollback()
203
+ raise Exception(f"Failed to bulk upsert into {table_name}: {str(e)}") from e
204
+
205
+ # %% ../nbs/02_utils_sql.ipynb 20
206
+ def get_by_id(db: Database, table_name: str, id_value: Any, id_col: str = "id") -> Any:
207
+ """Get a single record by ID."""
208
+ sql = f"SELECT * FROM {table_name} WHERE {id_col} = :id_value"
209
+ try:
210
+ result = db.conn.execute(text(sql), {"id_value": id_value})
211
+ return result
212
+ except Exception as e:
213
+ logger.error(f"Get by ID from '{table_name}' failed: {e}", exc_info=True)
214
+ raise Exception(f"Failed to get record from {table_name}: {str(e)}") from e
215
+
216
+ def update_record(db: Database, table_name: str, id_value: Any,
217
+ id_col: str = "id", auto_commit: bool = True, **updates) -> None:
218
+ """Update a single record by ID."""
219
+ if not updates:
220
+ return
221
+
222
+ set_clause = ', '.join([f"{col} = :{col}" for col in updates.keys()])
223
+ sql = f"UPDATE {table_name} SET {set_clause} WHERE {id_col} = :id_value"
224
+
225
+ params = {**updates, "id_value": id_value}
226
+
227
+ try:
228
+ db.conn.execute(text(sql), params)
229
+ if auto_commit:
230
+ db.conn.commit()
231
+ except Exception as e:
232
+ logger.error(f"Update record in '{table_name}' failed: {e}", exc_info=True)
233
+ if auto_commit:
234
+ db.conn.rollback()
235
+ raise Exception(f"Failed to update record in {table_name}: {str(e)}") from e
236
+
237
+ def delete_record(db: Database, table_name: str, id_value: Any, id_col: str = "id", auto_commit: bool = True) -> None:
238
+ """Delete a single record by ID."""
239
+ sql = f"DELETE FROM {table_name} WHERE {id_col} = :id_value"
240
+
241
+ try:
242
+ db.conn.execute(text(sql), {"id_value": id_value})
243
+ if auto_commit:
244
+ db.conn.commit()
245
+ except Exception as e:
246
+ logger.error(f"Delete from '{table_name}' failed: {e}", exc_info=True)
247
+ if auto_commit:
248
+ db.conn.rollback()
249
+ raise Exception(f"Failed to delete record from {table_name}: {str(e)}") from e
250
+
251
+ def bulk_delete(db: Database, table_name: str, id_list: List[Any], id_col: str = "id", auto_commit: bool = True) -> None:
252
+ """Delete multiple records by ID list."""
253
+ if not id_list:
254
+ return
255
+
256
+ # Create placeholder for IN clause
257
+ placeholders = ', '.join([f":id_{i}" for i in range(len(id_list))])
258
+ sql = f"DELETE FROM {table_name} WHERE {id_col} IN ({placeholders})"
259
+
260
+ # Build params dict
261
+ params = {f"id_{i}": val for i, val in enumerate(id_list)}
262
+
263
+ try:
264
+ db.conn.execute(text(sql), params)
265
+ if auto_commit:
266
+ db.conn.commit()
267
+ except Exception as e:
268
+ logger.error(f"Bulk delete from '{table_name}' failed: {e}", exc_info=True)
269
+ if auto_commit:
270
+ db.conn.rollback()
271
+ raise Exception(f"Failed to bulk delete from {table_name}: {str(e)}") from e
272
+
273
+ # %% ../nbs/02_utils_sql.ipynb 26
274
+ @contextmanager
275
+ def with_transaction(db: Database):
276
+ """Context manager for safe transaction handling with auto-rollback on error."""
277
+ try:
278
+ yield db
279
+ db.conn.commit()
280
+ except Exception as e:
281
+ db.conn.rollback()
282
+ raise e
283
+
284
+ def paginate_sql(sql: str, page: int, page_size: int) -> str:
285
+ """Add LIMIT/OFFSET pagination to a SQL query."""
286
+ offset = (page - 1) * page_size
287
+ return f"{sql.rstrip(';')} LIMIT {page_size} OFFSET {offset}"
288
+
289
+ def batch_execute(db: Database, operation_func, items: List[Any], batch_size: int = 100) -> None:
290
+ """Execute an operation on items in batches with commits after each batch."""
291
+ for i in range(0, len(items), batch_size):
292
+ batch = items[i:i + batch_size]
293
+ try:
294
+ for item in batch:
295
+ operation_func(db, item)
296
+ db.conn.commit()
297
+ except Exception as e:
298
+ logger.error(f"Batch execution failed at batch {i//batch_size + 1}: {e}", exc_info=True)
299
+ db.conn.rollback()
300
+ raise Exception(f"Batch execution failed at batch {i//batch_size + 1}: {str(e)}") from e
301
+
302
+ # %% ../nbs/02_utils_sql.ipynb 31
303
+ def to_cents(dollars: str | float | None) -> int | None:
304
+ """Convert dollar amount to integer cents for database storage."""
305
+ if dollars is None or dollars == '':
306
+ return None
307
+ try:
308
+ return int(float(dollars) * 100)
309
+ except (ValueError, TypeError):
310
+ return None
311
+
312
+
313
+ def from_cents(cents: int | None) -> str:
314
+ """Convert integer cents to formatted dollar string for display."""
315
+ if cents is None:
316
+ return "$0.00"
317
+ dollars = cents / 100
318
+ if dollars < 0:
319
+ return f"-${abs(dollars):,.2f}"
320
+ return f"${dollars:,.2f}"