bizon 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. bizon/alerting/alerts.py +0 -1
  2. bizon/common/models.py +184 -4
  3. bizon/connectors/destinations/bigquery/src/config.py +1 -1
  4. bizon/connectors/destinations/bigquery/src/destination.py +14 -9
  5. bizon/connectors/destinations/bigquery_streaming/config/bigquery_streaming.example.yml +74 -0
  6. bizon/connectors/destinations/bigquery_streaming/src/config.py +6 -5
  7. bizon/connectors/destinations/bigquery_streaming/src/destination.py +13 -9
  8. bizon/connectors/destinations/bigquery_streaming_v2/config/bigquery_streaming_v2.example.yml +79 -0
  9. bizon/connectors/destinations/bigquery_streaming_v2/src/config.py +6 -1
  10. bizon/connectors/destinations/bigquery_streaming_v2/src/destination.py +232 -49
  11. bizon/connectors/destinations/bigquery_streaming_v2/src/proto_utils.py +1 -13
  12. bizon/connectors/destinations/file/config/file.example.yml +40 -0
  13. bizon/connectors/destinations/file/src/config.py +2 -1
  14. bizon/connectors/destinations/file/src/destination.py +3 -6
  15. bizon/connectors/destinations/logger/config/logger.example.yml +30 -0
  16. bizon/connectors/destinations/logger/src/config.py +1 -2
  17. bizon/connectors/destinations/logger/src/destination.py +4 -2
  18. bizon/connectors/sources/cycle/src/source.py +2 -6
  19. bizon/connectors/sources/dummy/src/source.py +0 -4
  20. bizon/connectors/sources/gsheets/src/source.py +2 -3
  21. bizon/connectors/sources/hubspot/src/hubspot_base.py +0 -1
  22. bizon/connectors/sources/hubspot/src/hubspot_objects.py +3 -4
  23. bizon/connectors/sources/hubspot/src/models/hs_object.py +0 -1
  24. bizon/connectors/sources/kafka/config/kafka.example.yml +1 -3
  25. bizon/connectors/sources/kafka/config/kafka_debezium.example.yml +1 -3
  26. bizon/connectors/sources/kafka/config/kafka_streams.example.yml +124 -0
  27. bizon/connectors/sources/kafka/src/config.py +10 -12
  28. bizon/connectors/sources/kafka/src/decode.py +65 -60
  29. bizon/connectors/sources/kafka/src/source.py +182 -61
  30. bizon/connectors/sources/kafka/tests/kafka_pipeline.py +1 -1
  31. bizon/connectors/sources/notion/config/api_key.example.yml +35 -0
  32. bizon/connectors/sources/notion/src/__init__.py +0 -0
  33. bizon/connectors/sources/notion/src/config.py +59 -0
  34. bizon/connectors/sources/notion/src/source.py +1159 -0
  35. bizon/connectors/sources/notion/tests/notion_pipeline.py +7 -0
  36. bizon/connectors/sources/notion/tests/test_notion.py +113 -0
  37. bizon/connectors/sources/periscope/src/source.py +0 -6
  38. bizon/connectors/sources/pokeapi/src/source.py +0 -1
  39. bizon/connectors/sources/sana_ai/config/sana.example.yml +25 -0
  40. bizon/connectors/sources/sana_ai/src/source.py +85 -0
  41. bizon/destination/buffer.py +0 -1
  42. bizon/destination/config.py +9 -1
  43. bizon/destination/destination.py +38 -9
  44. bizon/engine/backend/adapters/sqlalchemy/backend.py +2 -5
  45. bizon/engine/backend/adapters/sqlalchemy/config.py +0 -1
  46. bizon/engine/config.py +0 -1
  47. bizon/engine/engine.py +0 -1
  48. bizon/engine/pipeline/consumer.py +0 -1
  49. bizon/engine/pipeline/producer.py +1 -5
  50. bizon/engine/queue/adapters/kafka/config.py +1 -1
  51. bizon/engine/queue/adapters/kafka/queue.py +0 -1
  52. bizon/engine/queue/adapters/python_queue/consumer.py +0 -1
  53. bizon/engine/queue/adapters/python_queue/queue.py +0 -2
  54. bizon/engine/queue/adapters/rabbitmq/consumer.py +0 -1
  55. bizon/engine/queue/adapters/rabbitmq/queue.py +0 -1
  56. bizon/engine/queue/config.py +0 -2
  57. bizon/engine/runner/adapters/process.py +0 -2
  58. bizon/engine/runner/adapters/streaming.py +114 -42
  59. bizon/engine/runner/adapters/thread.py +0 -2
  60. bizon/engine/runner/config.py +0 -1
  61. bizon/engine/runner/runner.py +14 -9
  62. bizon/monitoring/config.py +12 -2
  63. bizon/monitoring/datadog/monitor.py +100 -14
  64. bizon/monitoring/monitor.py +41 -12
  65. bizon/monitoring/noop/monitor.py +22 -3
  66. bizon/source/auth/authenticators/abstract_oauth.py +11 -3
  67. bizon/source/auth/authenticators/abstract_token.py +2 -1
  68. bizon/source/auth/authenticators/basic.py +1 -1
  69. bizon/source/auth/authenticators/cookies.py +2 -1
  70. bizon/source/auth/authenticators/oauth.py +8 -3
  71. bizon/source/config.py +0 -2
  72. bizon/source/cursor.py +8 -16
  73. bizon/source/discover.py +3 -6
  74. bizon/source/models.py +0 -1
  75. bizon/source/session.py +0 -1
  76. bizon/source/source.py +18 -3
  77. bizon/transform/config.py +0 -2
  78. bizon/transform/transform.py +0 -3
  79. {bizon-0.1.1.dist-info → bizon-0.2.0.dist-info}/METADATA +62 -41
  80. bizon-0.2.0.dist-info/RECORD +136 -0
  81. {bizon-0.1.1.dist-info → bizon-0.2.0.dist-info}/WHEEL +1 -1
  82. bizon-0.2.0.dist-info/entry_points.txt +2 -0
  83. bizon-0.1.1.dist-info/RECORD +0 -123
  84. bizon-0.1.1.dist-info/entry_points.txt +0 -3
  85. {bizon-0.1.1.dist-info → bizon-0.2.0.dist-info/licenses}/LICENSE +0 -0
bizon/alerting/alerts.py CHANGED
@@ -7,7 +7,6 @@ from bizon.alerting.models import AlertingConfig, AlertMethod, LogLevel
7
7
 
8
8
 
9
9
  class AbstractAlert(ABC):
10
-
11
10
  def __init__(self, type: AlertMethod, config: AlertingConfig, log_levels: List[LogLevel] = [LogLevel.ERROR]):
12
11
  self.type = type
13
12
  self.config = config
bizon/common/models.py CHANGED
@@ -1,9 +1,12 @@
1
- from typing import Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
- from pydantic import BaseModel, ConfigDict, Field
3
+ from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
4
4
 
5
5
  from bizon.alerting.models import AlertingConfig
6
- from bizon.connectors.destinations.bigquery.src.config import BigQueryConfig
6
+ from bizon.connectors.destinations.bigquery.src.config import (
7
+ BigQueryColumn,
8
+ BigQueryConfig,
9
+ )
7
10
  from bizon.connectors.destinations.bigquery_streaming.src.config import (
8
11
  BigQueryStreamingConfig,
9
12
  )
@@ -18,8 +21,74 @@ from bizon.source.config import SourceConfig, SourceSyncModes
18
21
  from bizon.transform.config import TransformModel
19
22
 
20
23
 
21
- class BizonConfig(BaseModel):
24
+ class StreamSourceConfig(BaseModel):
25
+ """Source-specific stream routing configuration.
26
+
27
+ Uses extra='allow' to support source-specific fields like:
28
+ - topic (Kafka)
29
+ - endpoint (API sources)
30
+ - channel (other streaming sources)
31
+ """
32
+
33
+ model_config = ConfigDict(extra="allow")
34
+
35
+ # Common field for stream identifier
36
+ name: Optional[str] = Field(None, description="Stream identifier within the source")
37
+
38
+ # Kafka-specific
39
+ topic: Optional[str] = Field(None, description="Kafka topic name")
40
+
41
+ # API-specific
42
+ endpoint: Optional[str] = Field(None, description="API endpoint path")
43
+
44
+
45
+ class StreamDestinationConfig(BaseModel):
46
+ """Destination configuration for a stream.
47
+
48
+ Supports destination-specific schema definitions and options.
49
+ Uses extra='allow' to support destination-specific overrides.
50
+ """
22
51
 
52
+ model_config = ConfigDict(extra="allow")
53
+
54
+ # Universal destination identifier
55
+ table_id: str = Field(..., description="Full destination identifier (e.g., project.dataset.table)")
56
+
57
+ # BigQuery-specific schema (can be extended for other destinations)
58
+ record_schema: Optional[list[BigQueryColumn]] = Field(None, description="Schema for the destination records")
59
+ clustering_keys: Optional[list[str]] = Field(None, description="Clustering keys for the destination table")
60
+
61
+
62
+ class StreamConfig(BaseModel):
63
+ """Configuration for a single stream.
64
+
65
+ Consolidates source stream routing and destination configuration in one place,
66
+ eliminating duplication of destination_id between source and destination configs.
67
+ """
68
+
69
+ model_config = ConfigDict(extra="forbid")
70
+
71
+ name: str = Field(..., description="Logical name for this stream")
72
+ source: StreamSourceConfig = Field(..., description="Source-specific routing configuration")
73
+ destination: StreamDestinationConfig = Field(
74
+ ..., description="Destination configuration including table and schema"
75
+ )
76
+
77
+ @field_validator("destination")
78
+ @classmethod
79
+ def validate_table_id_format(cls, v: StreamDestinationConfig) -> StreamDestinationConfig:
80
+ """Ensure table_id follows expected format for BigQuery-like destinations."""
81
+ if v.table_id:
82
+ parts = v.table_id.split(".")
83
+ if len(parts) != 3:
84
+ raise ValueError(
85
+ f"table_id must be in format 'project.dataset.table', got: {v.table_id}. "
86
+ f"Found {len(parts)} parts instead of 3."
87
+ )
88
+ return v
89
+
90
+
91
+ class BizonConfig(BaseModel):
23
92
  # Forbid extra keys in the model
24
93
  model_config = ConfigDict(extra="forbid")
25
94
 
@@ -63,6 +132,115 @@ class BizonConfig(BaseModel):
63
132
  default=None,
64
133
  )
65
134
 
135
+ streams: Optional[list[StreamConfig]] = Field(
136
+ None,
137
+ description="Stream routing configuration (opt-in for multi-table streaming). "
138
+ "Consolidates source stream definitions with destination tables and schemas.",
139
+ )
140
+
141
+ @field_validator("streams")
142
+ @classmethod
143
+ def validate_streams_config(cls, v: Optional[list[StreamConfig]], info) -> Optional[list[StreamConfig]]:
144
+ """Validate streams configuration consistency."""
145
+ if not v:
146
+ return v
147
+
148
+ # Check for duplicate stream names
149
+ names = [s.name for s in v]
150
+ if len(names) != len(set(names)):
151
+ duplicates = [name for name in names if names.count(name) > 1]
152
+ raise ValueError(f"Duplicate stream names found in streams configuration: {set(duplicates)}")
153
+
154
+ # Check for duplicate table_ids
155
+ table_ids = [s.destination.table_id for s in v]
156
+ if len(table_ids) != len(set(table_ids)):
157
+ duplicates = [tid for tid in table_ids if table_ids.count(tid) > 1]
158
+ raise ValueError(f"Duplicate table_ids found in streams configuration: {set(duplicates)}")
159
+
160
+ # Validate that source sync_mode is 'stream' if streams config is used
161
+ source_config = info.data.get("source") if info.data else None
162
+ if source_config and source_config.sync_mode != SourceSyncModes.STREAM:
163
+ raise ValueError(
164
+ f"Configuration Error: 'streams' configuration requires source.sync_mode='stream'. "
165
+ f"Current sync_mode: {source_config.sync_mode}. "
166
+ f"Please update your config to use:\n"
167
+ f" source:\n"
168
+ f" sync_mode: stream"
169
+ )
170
+
171
+ return v
172
+
173
+ @model_validator(mode="before")
174
+ @classmethod
175
+ def inject_config_from_streams(cls, data: Any) -> Any:
176
+ """Inject source and destination config from streams.
177
+
178
+ This runs BEFORE field validation, enriching both source and destination
179
+ configs from the streams configuration. This allows:
180
+ 1. Sources (like Kafka) to omit topics - they're extracted from streams
181
+ 2. Destinations with unnest=true to work without duplicate record_schemas
182
+
183
+ This is source-agnostic: each source type can extract what it needs from streams.
184
+ """
185
+ if not isinstance(data, dict):
186
+ return data
187
+
188
+ streams = data.get("streams")
189
+ if not streams:
190
+ return data
191
+
192
+ source = data.get("source")
193
+ if source and isinstance(source, dict):
194
+ source_name = source.get("name")
195
+
196
+ # Kafka: inject topics from streams
197
+ if source_name == "kafka":
198
+ # Check if topics is missing, None, or empty list
199
+ if not source.get("topics") or source.get("topics") == []:
200
+ topics = []
201
+ for stream in streams:
202
+ if isinstance(stream, dict):
203
+ stream_src = stream.get("source", {})
204
+ stream_dest = stream.get("destination", {})
205
+ if stream_src.get("topic"):
206
+ topics.append(
207
+ {
208
+ "name": stream_src.get("topic"),
209
+ "destination_id": stream_dest.get("table_id", ""),
210
+ }
211
+ )
212
+ if topics:
213
+ source["topics"] = topics
214
+
215
+ destination = data.get("destination")
216
+ if not destination or not isinstance(destination, dict):
217
+ return data
218
+
219
+ destination_config = destination.get("config")
220
+ if not destination_config or not isinstance(destination_config, dict):
221
+ return data
222
+
223
+ # Only inject if record_schemas is not already set or is empty
224
+ if not destination_config.get("record_schemas"):
225
+ # Build record_schemas from streams
226
+ record_schemas = []
227
+ for stream in streams:
228
+ if isinstance(stream, dict):
229
+ stream_dest = stream.get("destination", {})
230
+ if stream_dest.get("record_schema"):
231
+ record_schema_config = {
232
+ "destination_id": stream_dest.get("table_id"),
233
+ "record_schema": stream_dest.get("record_schema"),
234
+ "clustering_keys": stream_dest.get("clustering_keys"),
235
+ }
236
+ record_schemas.append(record_schema_config)
237
+
238
+ # Inject into destination config
239
+ if record_schemas:
240
+ destination_config["record_schemas"] = record_schemas
241
+
242
+ return data
243
+
66
244
 
67
245
  class SyncMetadata(BaseModel):
68
246
  """Model which stores general metadata around a sync.
@@ -75,6 +253,7 @@ class SyncMetadata(BaseModel):
75
253
  stream_name: str
76
254
  sync_mode: SourceSyncModes
77
255
  destination_name: str
256
+ destination_alias: str
78
257
 
79
258
  @classmethod
80
259
  def from_bizon_config(cls, job_id: str, config: BizonConfig) -> "SyncMetadata":
@@ -85,4 +264,5 @@ class SyncMetadata(BaseModel):
85
264
  stream_name=config.source.stream,
86
265
  sync_mode=config.source.sync_mode,
87
266
  destination_name=config.destination.name,
267
+ destination_alias=config.destination.alias,
88
268
  )
@@ -98,7 +98,6 @@ class BigQueryRecordSchemaConfig(BaseModel):
98
98
 
99
99
 
100
100
  class BigQueryConfigDetails(AbstractDestinationDetailsConfig):
101
-
102
101
  # Table details
103
102
  project_id: str = Field(..., description="BigQuery Project ID")
104
103
  dataset_id: str = Field(..., description="BigQuery Dataset ID")
@@ -123,5 +122,6 @@ class BigQueryConfigDetails(AbstractDestinationDetailsConfig):
123
122
 
124
123
  class BigQueryConfig(AbstractDestinationConfig):
125
124
  name: Literal[DestinationTypes.BIGQUERY]
125
+ alias: str = "bigquery"
126
126
  buffer_size: Optional[int] = 400
127
127
  config: BigQueryConfigDetails
@@ -14,6 +14,7 @@ from loguru import logger
14
14
  from bizon.common.models import SyncMetadata
15
15
  from bizon.destination.destination import AbstractDestination
16
16
  from bizon.engine.backend.backend import AbstractBackend
17
+ from bizon.monitoring.monitor import AbstractMonitor
17
18
  from bizon.source.config import SourceSyncModes
18
19
  from bizon.source.source import AbstractSourceCallback
19
20
 
@@ -21,15 +22,15 @@ from .config import BigQueryColumn, BigQueryConfigDetails
21
22
 
22
23
 
23
24
  class BigQueryDestination(AbstractDestination):
24
-
25
25
  def __init__(
26
26
  self,
27
27
  sync_metadata: SyncMetadata,
28
28
  config: BigQueryConfigDetails,
29
29
  backend: AbstractBackend,
30
30
  source_callback: AbstractSourceCallback,
31
+ monitor: AbstractMonitor,
31
32
  ):
32
- super().__init__(sync_metadata, config, backend, source_callback)
33
+ super().__init__(sync_metadata, config, backend, source_callback, monitor)
33
34
  self.config: BigQueryConfigDetails = config
34
35
 
35
36
  if config.authentication and config.authentication.service_account_key:
@@ -54,7 +55,6 @@ class BigQueryDestination(AbstractDestination):
54
55
 
55
56
  @property
56
57
  def temp_table_id(self) -> str:
57
-
58
58
  if self.sync_metadata.sync_mode == SourceSyncModes.FULL_REFRESH:
59
59
  return f"{self.table_id}_temp"
60
60
 
@@ -65,7 +65,6 @@ class BigQueryDestination(AbstractDestination):
65
65
  return f"{self.table_id}"
66
66
 
67
67
  def get_bigquery_schema(self, df_destination_records: pl.DataFrame) -> List[bigquery.SchemaField]:
68
-
69
68
  # Case we unnest the data
70
69
  if self.config.unnest:
71
70
  return [
@@ -111,9 +110,7 @@ class BigQueryDestination(AbstractDestination):
111
110
  # https://cloud.google.com/python/docs/reference/bigquery/latest/google.cloud.bigquery.dbapi.DataError
112
111
 
113
112
  def convert_and_upload_to_buffer(self, df_destination_records: pl.DataFrame) -> str:
114
-
115
113
  if self.buffer_format == "parquet":
116
-
117
114
  # Upload the Parquet file to GCS
118
115
  file_name = f"{self.sync_metadata.source_name}/{self.sync_metadata.stream_name}/{str(uuid4())}.parquet"
119
116
 
@@ -151,7 +148,6 @@ class BigQueryDestination(AbstractDestination):
151
148
  )
152
149
 
153
150
  def load_to_bigquery(self, gcs_file: str, df_destination_records: pl.DataFrame):
154
-
155
151
  # We always partition by the loaded_at field
156
152
  time_partitioning = TimePartitioning(field="_bizon_loaded_at", type_=self.config.time_partitioning)
157
153
 
@@ -169,7 +165,6 @@ class BigQueryDestination(AbstractDestination):
169
165
  assert result.state == "DONE", f"Job failed with state {result.state} with error {result.error_result}"
170
166
 
171
167
  def write_records(self, df_destination_records: pl.DataFrame) -> Tuple[bool, str]:
172
-
173
168
  # Rename fields to match BigQuery schema
174
169
  df_destination_records = df_destination_records.rename(
175
170
  {
@@ -199,7 +194,17 @@ class BigQueryDestination(AbstractDestination):
199
194
  def finalize(self):
200
195
  if self.sync_metadata.sync_mode == SourceSyncModes.FULL_REFRESH:
201
196
  logger.info(f"Loading temp table {self.temp_table_id} data into {self.table_id} ...")
202
- self.bq_client.query(f"CREATE OR REPLACE TABLE {self.table_id} AS SELECT * FROM {self.temp_table_id}")
197
+ query = f"CREATE OR REPLACE TABLE {self.table_id} AS SELECT * FROM {self.temp_table_id}"
198
+ result = self.bq_client.query(query)
199
+ bq_result = result.result() # Waits for the job to completew
200
+ logger.info(f"BigQuery CREATE OR REPLACE query result: {bq_result}")
201
+ # Check if the destination table exists by fetching it; raise if it doesn't exist
202
+ try:
203
+ self.bq_client.get_table(self.table_id)
204
+ except NotFound:
205
+ logger.error(f"Table {self.table_id} not found")
206
+ raise Exception(f"Table {self.table_id} not found")
207
+ # Cleanup
203
208
  logger.info(f"Deleting temp table {self.temp_table_id} ...")
204
209
  self.bq_client.delete_table(self.temp_table_id, not_found_ok=True)
205
210
  return True
@@ -0,0 +1,74 @@
1
+ # BigQuery Streaming Destination Configuration
2
+ # Uses the BigQuery Storage Write API for low-latency inserts
3
+ #
4
+ # Use this destination when:
5
+ # - You need near real-time data loading
6
+ # - Low latency is more important than cost optimization
7
+ # - Working with streaming/continuous data sources
8
+ #
9
+ # Requirements:
10
+ # - Service account with bigquery.dataEditor role
11
+ # - Dataset must already exist
12
+
13
+ name: source_to_bigquery_streaming
14
+
15
+ source:
16
+ name: <YOUR_SOURCE>
17
+ stream: <YOUR_STREAM>
18
+ authentication:
19
+ type: api_key
20
+ params:
21
+ token: <YOUR_API_KEY>
22
+
23
+ destination:
24
+ name: bigquery_streaming
25
+ config:
26
+ # GCP Project ID
27
+ project_id: <YOUR_GCP_PROJECT>
28
+
29
+ # BigQuery dataset (must exist)
30
+ dataset_id: <YOUR_DATASET>
31
+
32
+ # Dataset location (US, EU, etc.)
33
+ dataset_location: US
34
+
35
+ # Time partitioning (optional)
36
+ time_partitioning:
37
+ type: DAY # Options: DAY, HOUR, MONTH, YEAR
38
+ field: _bizon_loaded_at
39
+
40
+ # Max rows per streaming request (max 10000)
41
+ bq_max_rows_per_request: 5000
42
+
43
+ # Buffer settings
44
+ buffer_size: 50 # MB before flushing
45
+ buffer_flush_timeout: 300 # Seconds before forcing flush
46
+
47
+ # Authentication (optional - uses ADC if not provided)
48
+ # authentication:
49
+ # service_account_key: |
50
+ # {
51
+ # "type": "service_account",
52
+ # "project_id": "<YOUR_GCP_PROJECT>",
53
+ # ...
54
+ # }
55
+
56
+ # Schema definition for unnesting (optional)
57
+ # Required if unnest: true
58
+ # unnest: true
59
+ # record_schemas:
60
+ # - destination_id: my_table
61
+ # record_schema:
62
+ # - name: id
63
+ # type: STRING
64
+ # mode: REQUIRED
65
+ # - name: created_at
66
+ # type: TIMESTAMP
67
+ # mode: NULLABLE
68
+
69
+ engine:
70
+ backend:
71
+ type: bigquery
72
+ database: <YOUR_GCP_PROJECT>
73
+ schema: bizon_state
74
+ syncCursorInDBEvery: 10
@@ -41,16 +41,17 @@ class BigQueryStreamingConfigDetails(AbstractDestinationDetailsConfig):
41
41
  description="BigQuery Time partitioning type",
42
42
  )
43
43
  authentication: Optional[BigQueryAuthentication] = None
44
- bq_max_rows_per_request: Optional[int] = Field(30000, description="Max rows per buffer streaming request.")
44
+ bq_max_rows_per_request: Optional[int] = Field(
45
+ 5000,
46
+ description="Max rows per buffer streaming request. Must not exceed 10000.",
47
+ le=10000,
48
+ )
45
49
  record_schemas: Optional[list[BigQueryRecordSchemaConfig]] = Field(
46
50
  default=None, description="Schema for the records. Required if unnest is set to true."
47
51
  )
48
- use_legacy_streaming_api: bool = Field(
49
- default=False,
50
- description="[DEPRECATED] Use the legacy streaming API. This is required for some older BigQuery versions.",
51
- )
52
52
 
53
53
 
54
54
  class BigQueryStreamingConfig(AbstractDestinationConfig):
55
55
  name: Literal[DestinationTypes.BIGQUERY_STREAMING]
56
+ alias: str = "bigquery"
56
57
  config: BigQueryStreamingConfigDetails
@@ -36,15 +36,14 @@ from bizon.connectors.destinations.bigquery.src.config import (
36
36
  )
37
37
  from bizon.destination.destination import AbstractDestination
38
38
  from bizon.engine.backend.backend import AbstractBackend
39
+ from bizon.monitoring.monitor import AbstractMonitor
39
40
  from bizon.source.callback import AbstractSourceCallback
40
41
 
41
42
  from .config import BigQueryStreamingConfigDetails
42
43
 
43
44
 
44
45
  class BigQueryStreamingDestination(AbstractDestination):
45
-
46
46
  # Add constants for limits
47
- MAX_ROWS_PER_REQUEST = 5000 # 5000 (max is 10000)
48
47
  MAX_REQUEST_SIZE_BYTES = 5 * 1024 * 1024 # 5 MB (max is 10MB)
49
48
  MAX_ROW_SIZE_BYTES = 0.9 * 1024 * 1024 # 1 MB
50
49
 
@@ -54,8 +53,9 @@ class BigQueryStreamingDestination(AbstractDestination):
54
53
  config: BigQueryStreamingConfigDetails,
55
54
  backend: AbstractBackend,
56
55
  source_callback: AbstractSourceCallback,
56
+ monitor: AbstractMonitor,
57
57
  ): # type: ignore
58
- super().__init__(sync_metadata, config, backend, source_callback)
58
+ super().__init__(sync_metadata, config, backend, source_callback, monitor)
59
59
  self.config: BigQueryStreamingConfigDetails = config
60
60
 
61
61
  if config.authentication and config.authentication.service_account_key:
@@ -77,7 +77,6 @@ class BigQueryStreamingDestination(AbstractDestination):
77
77
  return self.destination_id or f"{self.project_id}.{self.dataset_id}.{tabled_id}"
78
78
 
79
79
  def get_bigquery_schema(self) -> List[bigquery.SchemaField]:
80
-
81
80
  if self.config.unnest:
82
81
  if len(list(self.record_schemas.keys())) == 1:
83
82
  self.destination_id = list(self.record_schemas.keys())[0]
@@ -168,7 +167,6 @@ class BigQueryStreamingDestination(AbstractDestination):
168
167
  Safe cast record values to the correct type for BigQuery.
169
168
  """
170
169
  for col in self.record_schemas[self.destination_id]:
171
-
172
170
  # Handle dicts as strings
173
171
  if col.type in [BigQueryColumnType.STRING, BigQueryColumnType.JSON]:
174
172
  if isinstance(row[col.name], dict) or isinstance(row[col.name], list):
@@ -222,7 +220,7 @@ class BigQueryStreamingDestination(AbstractDestination):
222
220
  try:
223
221
  # Handle streaming batch
224
222
  if batch.get("stream_batch") and len(batch["stream_batch"]) > 0:
225
- return self.bq_client.insert_rows_json(
223
+ self.bq_client.insert_rows_json(
226
224
  table,
227
225
  batch["stream_batch"],
228
226
  row_ids=[None] * len(batch["stream_batch"]),
@@ -245,6 +243,10 @@ class BigQueryStreamingDestination(AbstractDestination):
245
243
  if load_job.state != "DONE":
246
244
  raise Exception(f"Failed to load rows to BigQuery: {load_job.errors}")
247
245
 
246
+ self.monitor.track_large_records_synced(
247
+ num_records=len(batch["json_batch"]), extra_tags={"destination_id": self.destination_id}
248
+ )
249
+
248
250
  except Exception as e:
249
251
  logger.error(f"Error inserting batch: {str(e)}, type: {type(e)}")
250
252
  raise
@@ -347,10 +349,12 @@ class BigQueryStreamingDestination(AbstractDestination):
347
349
 
348
350
  # If adding this item would exceed either limit, yield current batch and start new one
349
351
  if (
350
- len(current_batch) >= self.MAX_ROWS_PER_REQUEST
352
+ len(current_batch) >= self.bq_max_rows_per_request
351
353
  or current_batch_size + item_size > self.MAX_REQUEST_SIZE_BYTES
352
354
  ):
353
- logger.debug(f"Yielding batch of {len(current_batch)} rows, size: {current_batch_size/1024/1024:.2f}MB")
355
+ logger.debug(
356
+ f"Yielding batch of {len(current_batch)} rows, size: {current_batch_size / 1024 / 1024:.2f}MB"
357
+ )
354
358
  yield {"stream_batch": current_batch, "json_batch": large_rows}
355
359
  current_batch = []
356
360
  current_batch_size = 0
@@ -366,7 +370,7 @@ class BigQueryStreamingDestination(AbstractDestination):
366
370
  # Yield the last batch
367
371
  if current_batch:
368
372
  logger.debug(
369
- f"Yielding streaming batch of {len(current_batch)} rows, size: {current_batch_size/1024/1024:.2f}MB"
373
+ f"Yielding streaming batch of {len(current_batch)} rows, size: {current_batch_size / 1024 / 1024:.2f}MB"
370
374
  )
371
375
  logger.debug(f"Yielding large rows batch of {len(large_rows)} rows")
372
376
  yield {"stream_batch": current_batch, "json_batch": large_rows}
@@ -0,0 +1,79 @@
1
+ # BigQuery Streaming V2 Destination Configuration
2
+ # Uses the BigQuery Storage Write API (v2) for improved streaming performance
3
+ #
4
+ # Use this destination when:
5
+ # - You need the latest BigQuery streaming features
6
+ # - Working with high-volume streaming data
7
+ # - Require better error handling and retry logic
8
+ #
9
+ # Differences from v1:
10
+ # - Improved batching and retry logic
11
+ # - Better handling of schema evolution
12
+ # - Enhanced error reporting
13
+ #
14
+ # Requirements:
15
+ # - Service account with bigquery.dataEditor role
16
+ # - Dataset must already exist
17
+
18
+ name: source_to_bigquery_streaming_v2
19
+
20
+ source:
21
+ name: <YOUR_SOURCE>
22
+ stream: <YOUR_STREAM>
23
+ authentication:
24
+ type: api_key
25
+ params:
26
+ token: <YOUR_API_KEY>
27
+
28
+ destination:
29
+ name: bigquery_streaming_v2
30
+ config:
31
+ # GCP Project ID
32
+ project_id: <YOUR_GCP_PROJECT>
33
+
34
+ # BigQuery dataset (must exist)
35
+ dataset_id: <YOUR_DATASET>
36
+
37
+ # Dataset location (US, EU, etc.)
38
+ dataset_location: US
39
+
40
+ # Time partitioning (optional)
41
+ time_partitioning:
42
+ type: DAY # Options: DAY, HOUR, MONTH, YEAR
43
+ field: _bizon_loaded_at
44
+
45
+ # Max rows per streaming request (max 10000)
46
+ bq_max_rows_per_request: 5000
47
+
48
+ # Buffer settings
49
+ buffer_size: 50 # MB before flushing
50
+ buffer_flush_timeout: 300 # Seconds before forcing flush
51
+
52
+ # Authentication (optional - uses ADC if not provided)
53
+ # authentication:
54
+ # service_account_key: |
55
+ # {
56
+ # "type": "service_account",
57
+ # "project_id": "<YOUR_GCP_PROJECT>",
58
+ # ...
59
+ # }
60
+
61
+ # Schema definition for unnesting (optional)
62
+ # Required if unnest: true
63
+ # unnest: true
64
+ # record_schemas:
65
+ # - destination_id: my_table
66
+ # record_schema:
67
+ # - name: id
68
+ # type: STRING
69
+ # mode: REQUIRED
70
+ # - name: created_at
71
+ # type: TIMESTAMP
72
+ # mode: NULLABLE
73
+
74
+ engine:
75
+ backend:
76
+ type: bigquery
77
+ database: <YOUR_GCP_PROJECT>
78
+ schema: bizon_state
79
+ syncCursorInDBEvery: 10
@@ -41,7 +41,11 @@ class BigQueryStreamingV2ConfigDetails(AbstractDestinationDetailsConfig):
41
41
  description="BigQuery Time partitioning type",
42
42
  )
43
43
  authentication: Optional[BigQueryAuthentication] = None
44
- bq_max_rows_per_request: Optional[int] = Field(30000, description="Max rows per buffer streaming request.")
44
+ bq_max_rows_per_request: Optional[int] = Field(
45
+ 5000,
46
+ description="Max rows per buffer streaming request. Must not exceed 10000.",
47
+ le=10000,
48
+ )
45
49
  record_schemas: Optional[list[BigQueryRecordSchemaConfig]] = Field(
46
50
  default=None, description="Schema for the records. Required if unnest is set to true."
47
51
  )
@@ -49,4 +53,5 @@ class BigQueryStreamingV2ConfigDetails(AbstractDestinationDetailsConfig):
49
53
 
50
54
  class BigQueryStreamingV2Config(AbstractDestinationConfig):
51
55
  name: Literal[DestinationTypes.BIGQUERY_STREAMING_V2]
56
+ alias: str = "bigquery"
52
57
  config: BigQueryStreamingV2ConfigDetails