buildgrid 0.2.52__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,8 +17,7 @@
17
17
  SQLIndex
18
18
  ==================
19
19
 
20
- A SQL index implementation. This can be pointed to either a remote SQL server
21
- or a local SQLite database.
20
+ A SQL index implementation. This must be pointed to a remote SQL server.
22
21
 
23
22
  """
24
23
 
@@ -35,6 +34,8 @@ from sqlalchemy.orm import InstrumentedAttribute, Session, load_only
35
34
  from sqlalchemy.orm.exc import StaleDataError
36
35
  from sqlalchemy.orm.query import Query
37
36
  from sqlalchemy.orm.session import Session as SessionType
37
+ from sqlalchemy.dialects.postgresql import insert
38
+ from sqlalchemy.sql.functions import coalesce
38
39
 
39
40
  from buildgrid._protos.build.bazel.remote.execution.v2.remote_execution_pb2 import Digest
40
41
  from buildgrid._protos.google.rpc import code_pb2
@@ -49,10 +50,8 @@ from buildgrid.server.sql.provider import SqlProvider
49
50
 
50
51
  from ..storage_abc import StorageABC
51
52
  from .index_abc import IndexABC
52
- from .sql_dialect_delegates import PostgreSQLDelegate, SQLiteDelegate
53
53
 
54
54
  LOGGER = buildgrid_logger(__name__)
55
- DIALECT_DELEGATES = {"postgresql": PostgreSQLDelegate, "sqlite": SQLiteDelegate}
56
55
 
57
56
  INLINE_BLOB_SIZE_HARD_MAXIMUM = 1000000000
58
57
 
@@ -151,9 +150,6 @@ class SQLIndex(IndexABC):
151
150
  unknown_args = kwargs_keys - available_options
152
151
  raise TypeError(f"Unknown keyword arguments: [{unknown_args}]")
153
152
 
154
- # Dialect-specific initialization
155
- self._dialect_delegate = DIALECT_DELEGATES.get(self._sql.dialect)
156
-
157
153
  if inclause_limit > 0:
158
154
  if inclause_limit > window_size:
159
155
  LOGGER.warning(
@@ -377,14 +373,32 @@ class SQLIndex(IndexABC):
377
373
 
378
374
  digest_blob_pairs = sorted(digest_blob_pairs, key=lambda pair: (pair[0].hash, pair[0].size_bytes))
379
375
 
380
- if self._dialect_delegate:
381
- try:
382
- self._dialect_delegate._save_digests_to_index( # type: ignore
383
- digest_blob_pairs, session, self._max_inline_blob_size
384
- )
385
- return
386
- except AttributeError:
387
- pass
376
+ # See discussion of __table__ typing in https://github.com/sqlalchemy/sqlalchemy/issues/9130
377
+ index_table = cast(Table, IndexEntry.__table__)
378
+ update_time = datetime.utcnow()
379
+ new_rows = [
380
+ {
381
+ "digest_hash": digest.hash,
382
+ "digest_size_bytes": digest.size_bytes,
383
+ "accessed_timestamp": update_time,
384
+ "inline_blob": (blob if digest.size_bytes <= self._max_inline_blob_size else None),
385
+ "deleted": False,
386
+ }
387
+ for (digest, blob) in digest_blob_pairs
388
+ ]
389
+
390
+ base_insert_stmt = insert(index_table).values(new_rows)
391
+
392
+ update_stmt = base_insert_stmt.on_conflict_do_update(
393
+ index_elements=["digest_hash"],
394
+ set_={
395
+ "accessed_timestamp": update_time,
396
+ "inline_blob": coalesce(base_insert_stmt.excluded.inline_blob, index_table.c.inline_blob),
397
+ "deleted": False,
398
+ },
399
+ )
400
+
401
+ session.execute(update_stmt)
388
402
 
389
403
  update_time = datetime.utcnow()
390
404
  # Figure out which digests we can just update
@@ -26,8 +26,7 @@ from io import BytesIO
26
26
  from typing import IO, Any, Iterator, Sequence, TypedDict, cast
27
27
 
28
28
  from sqlalchemy import CursorResult, delete, func, select
29
- from sqlalchemy.dialects.postgresql import insert as postgresql_insert
30
- from sqlalchemy.dialects.sqlite import insert as sqlite_insert
29
+ from sqlalchemy.dialects.postgresql import insert
31
30
  from sqlalchemy.exc import DBAPIError
32
31
  from sqlalchemy.orm.exc import StaleDataError
33
32
 
@@ -59,7 +58,7 @@ class SQLStorage(StorageABC):
59
58
  self._sql_ro = sql_ro_provider or sql_provider
60
59
  self._inclause_limit = self._sql.default_inlimit
61
60
 
62
- supported_dialects = ["postgresql", "sqlite"]
61
+ supported_dialects = ["postgresql"]
63
62
 
64
63
  if self._sql.dialect not in supported_dialects:
65
64
  raise RuntimeError(
@@ -71,14 +70,6 @@ class SQLStorage(StorageABC):
71
70
  with self._sql.session() as session:
72
71
  session.query(BlobEntry).first()
73
72
 
74
- def _sqlite_bulk_insert(self, new_rows: list[DigestRow]) -> None:
75
- with self._sql.session() as session:
76
- session.execute(sqlite_insert(BlobEntry).values(new_rows).on_conflict_do_nothing())
77
-
78
- def _postgresql_bulk_insert(self, new_rows: list[DigestRow]) -> None:
79
- with self._sql.session() as session:
80
- session.execute(postgresql_insert(BlobEntry).values(new_rows).on_conflict_do_nothing())
81
-
82
73
  def _bulk_insert(self, digests: list[tuple[Digest, bytes]]) -> None:
83
74
  # Sort digests by hash to ensure consistent order to minimize deadlocks
84
75
  # when BatchUpdateBlobs requests have overlapping blobs
@@ -87,12 +78,8 @@ class SQLStorage(StorageABC):
87
78
  for (digest, blob) in sorted(digests, key=lambda x: x[0].hash)
88
79
  ]
89
80
 
90
- if self._sql.dialect == "sqlite":
91
- self._sqlite_bulk_insert(new_rows)
92
- elif self._sql.dialect == "postgresql":
93
- self._postgresql_bulk_insert(new_rows)
94
- else:
95
- raise RuntimeError(f"Unsupported dialect {self._sql.dialect} for bulk_insert")
81
+ with self._sql.session() as session:
82
+ session.execute(insert(BlobEntry).values(new_rows).on_conflict_do_nothing())
96
83
 
97
84
  @timed(METRIC.STORAGE.STAT_DURATION, type=TYPE)
98
85
  def has_blob(self, digest: Digest) -> bool:
@@ -31,7 +31,7 @@ from google.protobuf.internal.containers import RepeatedCompositeFieldContainer
31
31
  from google.protobuf.timestamp_pb2 import Timestamp
32
32
  from grpc import Channel
33
33
  from sqlalchemy import ColumnExpressionArgument, CursorResult, and_, delete, func, insert, or_, select, text, update
34
- from sqlalchemy.dialects import postgresql, sqlite
34
+ from sqlalchemy.dialects import postgresql
35
35
  from sqlalchemy.exc import IntegrityError
36
36
  from sqlalchemy.orm import Session, joinedload
37
37
  from sqlalchemy.sql.expression import Insert, Select
@@ -760,11 +760,10 @@ class Scheduler:
760
760
  )
761
761
 
762
762
  def _notify_job_updated(self, job_names: str | list[str], session: Session) -> None:
763
- if self._sql.dialect == "postgresql":
764
- if isinstance(job_names, str):
765
- job_names = [job_names]
766
- for job_name in job_names:
767
- session.execute(text(f"NOTIFY {NotificationChannel.JOB_UPDATED.value}, '{job_name}';"))
763
+ if isinstance(job_names, str):
764
+ job_names = [job_names]
765
+ for job_name in job_names:
766
+ session.execute(text(f"NOTIFY {NotificationChannel.JOB_UPDATED.value}, '{job_name}';"))
768
767
 
769
768
  def _get_operation(self, operation_name: str, session: Session) -> OperationEntry | None:
770
769
  statement = (
@@ -776,7 +775,7 @@ class Scheduler:
776
775
 
777
776
  def _batch_timeout_jobs(self, job_select_stmt: Select[Any], status_code: int, message: str) -> int:
778
777
  """Timeout all jobs selected by a query"""
779
- with self._sql.session(sqlite_lock_immediately=True, exceptions_to_not_raise_on=[Exception]) as session:
778
+ with self._sql.session(exceptions_to_not_raise_on=[Exception]) as session:
780
779
  # Get the full list of jobs to timeout
781
780
  job_entries = session.execute(job_select_stmt).scalars().all()
782
781
  jobs = []
@@ -838,7 +837,7 @@ class Scheduler:
838
837
  .limit(1)
839
838
  .with_for_update(skip_locked=True)
840
839
  )
841
- with self._sql.session(sqlite_lock_immediately=True, exceptions_to_not_raise_on=[Exception]) as session:
840
+ with self._sql.session(exceptions_to_not_raise_on=[Exception]) as session:
842
841
  job = session.execute(stale_job_statement).scalar_one_or_none()
843
842
  if not job:
844
843
  return False
@@ -916,22 +915,14 @@ class Scheduler:
916
915
  job.cancelled = True
917
916
 
918
917
  # If the job was assigned to a bot, we need to update the quota / capacity
919
- if self._sql.dialect == "postgresql":
920
- update_query = (
921
- update(BotEntry)
922
- .where(BotEntry.bot_id == job.worker_name)
923
- .values(capacity=BotEntry.capacity + 1)
924
- .returning(BotEntry.cohort)
925
- )
926
- if cohort := session.execute(update_query).scalar_one_or_none():
927
- self._update_instance_quota_usage(session, cohort, job.instance_name, -1, guard=None)
928
- else:
929
- if bot := session.execute(
930
- select(BotEntry).where(BotEntry.bot_id == job.worker_name).with_for_update()
931
- ).scalar_one_or_none():
932
- bot.capacity += 1
933
- if bot.cohort:
934
- self._update_instance_quota_usage(session, bot.cohort, job.instance_name, -1, guard=None)
918
+ update_query = (
919
+ update(BotEntry)
920
+ .where(BotEntry.bot_id == job.worker_name)
921
+ .values(capacity=BotEntry.capacity + 1)
922
+ .returning(BotEntry.cohort)
923
+ )
924
+ if cohort := session.execute(update_query).scalar_one_or_none():
925
+ self._update_instance_quota_usage(session, cohort, job.instance_name, -1, guard=None)
935
926
 
936
927
  session.add(
937
928
  JobHistoryEntry(
@@ -1157,8 +1148,7 @@ class Scheduler:
1157
1148
  self._notify_job_updated(job.name, session)
1158
1149
 
1159
1150
  LOGGER.debug("Assigned job to bot", tags=log_tags)
1160
- if self._sql.dialect == "postgresql":
1161
- session.execute(text(f"NOTIFY {NotificationChannel.JOB_ASSIGNED.value}, '{bot.name}';"))
1151
+ session.execute(text(f"NOTIFY {NotificationChannel.JOB_ASSIGNED.value}, '{bot.name}';"))
1162
1152
 
1163
1153
  def _match_bot_by_sampling(
1164
1154
  self, session: Session, query: Select[tuple[BotEntry]], sampling: SamplingConfig
@@ -1523,7 +1513,7 @@ class Scheduler:
1523
1513
  )
1524
1514
 
1525
1515
  updated = False
1526
- with self._sql.session(sqlite_lock_immediately=True) as session:
1516
+ with self._sql.session() as session:
1527
1517
  job = session.execute(job_statement).scalar_one_or_none()
1528
1518
  if job is not None:
1529
1519
  self._match_job_to_bot(session, job, failure_backoff, bot_assignment_fn, assigner_name)
@@ -1558,7 +1548,7 @@ class Scheduler:
1558
1548
  )
1559
1549
 
1560
1550
  updated = False
1561
- with self._sql.session(sqlite_lock_immediately=True) as session:
1551
+ with self._sql.session() as session:
1562
1552
  job = session.execute(job_statement).scalar_one_or_none()
1563
1553
  if job is not None:
1564
1554
  self._match_job_to_bot(session, job, failure_backoff, bot_assignment_fn, assigner_name)
@@ -1702,22 +1692,8 @@ class Scheduler:
1702
1692
  return num_rows_deleted
1703
1693
 
1704
1694
  def _insert_on_conflict_do_nothing(self, model: type[OrmBase]) -> Insert:
1705
- # `Insert.on_conflict_do_nothing` is a SQLAlchemy "generative method", it
1706
- # returns a modified copy of the statement it is called on. For
1707
- # some reason mypy can't understand this, so the errors are ignored here.
1708
- if self._sql.dialect == "sqlite":
1709
- sqlite_insert: sqlite.Insert = sqlite.insert(model)
1710
- return sqlite_insert.on_conflict_do_nothing()
1711
-
1712
- elif self._sql.dialect == "postgresql":
1713
- insertion: postgresql.Insert = postgresql.insert(model)
1714
- return insertion.on_conflict_do_nothing()
1715
-
1716
- else:
1717
- # Fall back to the non-specific insert implementation. This doesn't
1718
- # support `ON CONFLICT DO NOTHING`, so callers need to be careful to
1719
- # still catch IntegrityErrors if other database backends are possible.
1720
- return insert(model)
1695
+ insertion: postgresql.Insert = postgresql.insert(model)
1696
+ return insertion.on_conflict_do_nothing()
1721
1697
 
1722
1698
  def get_or_create_client_identity_in_store(
1723
1699
  self, session: Session, client_id: ClientIdentityEntry
@@ -1744,7 +1720,7 @@ class Scheduler:
1744
1720
  try:
1745
1721
  session.execute(insertion)
1746
1722
 
1747
- # Handle unique constraint violation when using an unsupported database (ie. not PostgreSQL or SQLite)
1723
+ # Handle unique constraint violation when using an unsupported database (ie. not PostgreSQL)
1748
1724
  except IntegrityError:
1749
1725
  LOGGER.debug("Handled IntegrityError when inserting client identity.")
1750
1726
 
@@ -1777,7 +1753,7 @@ class Scheduler:
1777
1753
  try:
1778
1754
  session.execute(insertion)
1779
1755
 
1780
- # Handle unique constraint violation when using an unsupported database (ie. not PostgreSQL or SQLite)
1756
+ # Handle unique constraint violation when using an unsupported database (ie. not PostgreSQL)
1781
1757
  except IntegrityError:
1782
1758
  LOGGER.debug("Handled IntegrityError when inserting request metadata.")
1783
1759
 
@@ -1949,7 +1925,9 @@ class Scheduler:
1949
1925
  )
1950
1926
 
1951
1927
  @timed(METRIC.SCHEDULER.ASSIGNMENT_DURATION)
1952
- def _fetch_job_for_bot(self, session: Session, bot: BotEntry, log_tags: Tags) -> JobEntry | None:
1928
+ def _fetch_job_for_bot(
1929
+ self, session: Session, bot: BotEntry, usage_diffs: InstanceQuotaUsageDiffs, log_tags: Tags
1930
+ ) -> JobEntry | None:
1953
1931
  # Attempt to fetch a new job for a bot to work on.
1954
1932
  # This can help if there are usually more jobs available than bots.
1955
1933
 
@@ -1959,15 +1937,22 @@ class Scheduler:
1959
1937
  if bot.instance_name != "*":
1960
1938
  job_statement = job_statement.where(self._job_in_instance_pool())
1961
1939
 
1962
- # Prioritize instance where usage <= max_quota - bot.capacity
1963
- # `- bot.capacity` to avoid over-assigning jobs to bots when nearing quota limits
1964
- instances_query = select(InstanceQuota.instance_name).where(
1965
- InstanceQuota.bot_cohort == bot.cohort,
1966
- InstanceQuota.current_usage <= InstanceQuota.max_quota - bot.capacity,
1967
- )
1968
- instances = session.execute(instances_query).scalars().all()
1969
- if instances:
1970
- job_statement = job_statement.where(JobEntry.instance_name.in_(instances))
1940
+ if bot.cohort:
1941
+ # Prioritize instance where usage <= max_quota - bot.capacity
1942
+ # `- bot.capacity` to avoid over-assigning jobs to bots when nearing quota limits
1943
+ instances_query = select(InstanceQuota.instance_name).where(
1944
+ InstanceQuota.bot_cohort == bot.cohort,
1945
+ InstanceQuota.current_usage <= InstanceQuota.max_quota - bot.capacity,
1946
+ )
1947
+ instances: set[str] = set()
1948
+ instances.update(session.execute(instances_query).scalars().all())
1949
+ # Always allow scheduling more jobs of an instance if we're returning usage
1950
+ instances.update(
1951
+ {instance for (cohort, instance), diff in usage_diffs.items() if cohort == bot.cohort and diff < 0}
1952
+ )
1953
+
1954
+ if instances:
1955
+ job_statement = job_statement.where(JobEntry.instance_name.in_(instances))
1971
1956
 
1972
1957
  if next_job := session.execute(job_statement).scalar_one_or_none():
1973
1958
  log_tags["db.next_job_name"] = next_job.name
@@ -2321,7 +2306,7 @@ class Scheduler:
2321
2306
 
2322
2307
  for _ in range(fetch_limit):
2323
2308
  # Try to fill up the newly free capacity with new jobs.
2324
- if new_job := self._fetch_job_for_bot(session, bot, log_tags):
2309
+ if new_job := self._fetch_job_for_bot(session, bot, usage_diffs, log_tags):
2325
2310
  if bot.cohort:
2326
2311
  usage_diffs[(bot.cohort, new_job.instance_name)] += 1
2327
2312
  synchronized_leases.append(new_job.to_lease_proto())
@@ -2844,18 +2829,9 @@ class Scheduler:
2844
2829
  if self.bot_locality_hint_limit == 0:
2845
2830
  return
2846
2831
 
2847
- # Insert new hint with dialect-specific seq handling
2848
- if self._sql.dialect == "postgresql":
2849
- # For PostgreSQL, use the sequence to get the next seq number
2850
- next_seq = None
2851
- else:
2852
- # For SQLite, manually find the max seq number
2853
- max_seq = session.execute(
2854
- select(func.coalesce(func.max(BotLocalityHintEntry.sequence_number), 0)).where(
2855
- BotLocalityHintEntry.bot_name == bot_name
2856
- )
2857
- ).scalar_one()
2858
- next_seq = max_seq + 1
2832
+ # Insert new hint with seq handling
2833
+ # For PostgreSQL, use the sequence to get the next seq number
2834
+ next_seq = None
2859
2835
 
2860
2836
  new_hint = BotLocalityHintEntry(
2861
2837
  bot_name=bot_name,
@@ -2960,9 +2936,6 @@ class Scheduler:
2960
2936
  # `greatest(0,_)` is needed if this feature is released when there are already running jobs
2961
2937
  # TODO: remove the safe-guard after the next minor version bump
2962
2938
  new_usage: Any = func.greatest(0, InstanceQuota.current_usage + delta)
2963
- if self._sql.dialect == "sqlite":
2964
- # SQLite does not support `greatest`, so we use a simpler update for it.
2965
- new_usage = InstanceQuota.current_usage + delta
2966
2939
 
2967
2940
  update_usage_query = (
2968
2941
  update(InstanceQuota)
@@ -25,7 +25,7 @@ from sqlalchemy import select as sql_select
25
25
  from sqlalchemy.orm import Session
26
26
 
27
27
  from buildgrid.server.logging import buildgrid_logger
28
- from buildgrid.server.sql.models import BotEntry, JobEntry
28
+ from buildgrid.server.sql.models import BotEntry
29
29
  from buildgrid.server.sql.provider import SqlProvider
30
30
  from buildgrid.server.threading import ContextWorker
31
31
 
@@ -79,10 +79,7 @@ class Notifier(Generic[T]):
79
79
  while not shutdown_requested.is_set():
80
80
  try:
81
81
  with self._sql.session() as session:
82
- if self._sql.dialect == "postgresql":
83
- self._listen_for_updates(shutdown_requested, session)
84
- else:
85
- self._poll_for_updates(shutdown_requested, session)
82
+ self._listen_for_updates(shutdown_requested, session)
86
83
  except Exception as e:
87
84
  LOGGER.warning(
88
85
  f"OperationsNotifier encountered exception: {e}.",
@@ -122,23 +119,6 @@ class Notifier(Generic[T]):
122
119
  notify = dbapi_connection.notifies.pop()
123
120
  self.notify(notify.payload)
124
121
 
125
- def _poll(self, names: list[str], session: Session) -> dict[str, T]:
126
- raise NotImplementedError()
127
-
128
- def _poll_for_updates(self, shutdown_requested: Event, session: Session) -> None:
129
- prev_data: dict[str, T] = {}
130
- while not shutdown_requested.is_set():
131
- with self._lock:
132
- names = list(self._listeners)
133
-
134
- next_data: dict[str, T] = self._poll(names, session)
135
- for name in next_data:
136
- if name not in prev_data or prev_data[name] != next_data[name]:
137
- self.notify(name)
138
-
139
- prev_data = next_data
140
- shutdown_requested.wait(timeout=self.poll_interval)
141
-
142
122
  def notify(self, listener_name: str) -> None:
143
123
  with self._lock:
144
124
  if listener_name in self._listeners:
@@ -179,27 +159,11 @@ class OperationsNotifier(Notifier[tuple[bool, int]]):
179
159
  """
180
160
  super().__init__(sql_provider, NotificationChannel.JOB_UPDATED, "OperationsNotifier", poll_interval)
181
161
 
182
- def _poll(self, names: list[str], session: Session) -> dict[str, tuple[bool, int]]:
183
- # Only query for the minimal amount of data required.
184
- # The subscribers can choose how they want to act (e.g. by querying the full job data).
185
- statement = sql_select(JobEntry.name, JobEntry.cancelled, JobEntry.stage).where(JobEntry.name.in_(names))
186
- next_data: dict[str, tuple[bool, int]] = {}
187
- for [name, cancelled, stage] in session.execute(statement).all():
188
- next_data[name] = (cancelled, stage)
189
- return next_data
190
-
191
162
 
192
163
  class BotNotifier(Notifier[str]):
193
164
  def __init__(self, sql_provider: SqlProvider, poll_interval: float = 1.0) -> None:
194
165
  super().__init__(sql_provider, NotificationChannel.JOB_ASSIGNED, "BotNotifier", poll_interval)
195
166
 
196
- def _poll(self, names: list[str], session: Session) -> dict[str, str]:
197
- statement = sql_select(BotEntry.name, BotEntry.lease_id).where(BotEntry.name.in_(names))
198
- next_data = {}
199
- for name, lease_id in session.execute(statement).all():
200
- next_data[name] = lease_id
201
- return next_data
202
-
203
167
  def listener_count_for_instance(self, instance_name: str) -> int:
204
168
  with self._lock:
205
169
  stmt = sql_select(func.count(BotEntry.name)).where(
@@ -119,7 +119,6 @@ def upgrade() -> None:
119
119
  ["worker_completed_timestamp"],
120
120
  unique=False,
121
121
  postgresql_where=sa.text("worker_completed_timestamp IS NOT NULL"),
122
- sqlite_where=sa.text("worker_completed_timestamp IS NOT NULL"),
123
122
  )
124
123
  op.create_index(
125
124
  "ix_worker_start_timestamp",
@@ -127,7 +126,6 @@ def upgrade() -> None:
127
126
  ["worker_start_timestamp"],
128
127
  unique=False,
129
128
  postgresql_where=sa.text("worker_start_timestamp IS NOT NULL"),
130
- sqlite_where=sa.text("worker_start_timestamp IS NOT NULL"),
131
129
  )
132
130
  op.create_table(
133
131
  "platform_properties",
@@ -220,13 +218,11 @@ def downgrade() -> None:
220
218
  "ix_worker_start_timestamp",
221
219
  table_name="jobs",
222
220
  postgresql_where=sa.text("worker_start_timestamp IS NOT NULL"),
223
- sqlite_where=sa.text("worker_start_timestamp IS NOT NULL"),
224
221
  )
225
222
  op.drop_index(
226
223
  "ix_worker_completed_timestamp",
227
224
  table_name="jobs",
228
225
  postgresql_where=sa.text("worker_completed_timestamp IS NOT NULL"),
229
- sqlite_where=sa.text("worker_completed_timestamp IS NOT NULL"),
230
226
  )
231
227
  op.drop_index(op.f("ix_jobs_worker_name"), table_name="jobs")
232
228
  op.drop_index("ix_jobs_stage_property_label", table_name="jobs")
@@ -37,7 +37,8 @@ from buildgrid.server.enums import LeaseState, OperationStage
37
37
 
38
38
  bigint = Annotated[int, "bigint"]
39
39
  # This gives us something to reference in the type_annotation_map to specify the JSONB variant when
40
- # using postgresql. When we drop SQLite support this won't be necessary versus just using JSONB in
40
+ # using postgresql.
41
+ # TODO now SQLite support has been dropped this won't be necessary versus just using JSONB in
41
42
  # the model directly.
42
43
  json = Annotated[JSON, "json"]
43
44
 
@@ -133,7 +134,6 @@ class JobEntry(Base):
133
134
  "worker_completed_timestamp",
134
135
  unique=False,
135
136
  postgresql_where=worker_completed_timestamp.isnot(None),
136
- sqlite_where=worker_completed_timestamp.isnot(None),
137
137
  ),
138
138
  Index(
139
139
  "ix_jobs_property_label_stage",
@@ -15,11 +15,10 @@
15
15
 
16
16
  from contextlib import contextmanager
17
17
  from datetime import timedelta
18
- from tempfile import NamedTemporaryFile
19
18
  from threading import Lock
20
19
  from typing import Any, Generator, Iterator
21
20
 
22
- from sqlalchemy import create_engine, event, text
21
+ from sqlalchemy import create_engine
23
22
  from sqlalchemy.engine import Engine
24
23
  from sqlalchemy.orm import Session, scoped_session, sessionmaker
25
24
  from sqlalchemy.pool import NullPool
@@ -37,9 +36,8 @@ from buildgrid.server.sql.models import Base
37
36
 
38
37
  from .utils import (
39
38
  SQLPoolDisposeHelper,
40
- is_psycopg2_connection_string,
41
- is_sqlite_connection_string,
42
- is_sqlite_inmemory_connection_string,
39
+ is_postgresql_connection_string,
40
+ USE_POSTGRES_MESSAGE,
43
41
  )
44
42
 
45
43
  LOGGER = buildgrid_logger(__name__)
@@ -47,15 +45,13 @@ LOGGER = buildgrid_logger(__name__)
47
45
  # Each dialect has a limit on the number of bind parameters allowed. This
48
46
  # matters because it determines how large we can allow our IN clauses to get.
49
47
  #
50
- # SQLite: 1000 https://www.sqlite.org/limits.html#max_variable_number
51
48
  # PostgreSQL: 32767 (Int16.MAX_VALUE) https://www.postgresql.org/docs/9.4/protocol-message-formats.html
52
49
  #
53
50
  # We'll refer to this as the "inlimit" in the code. The inlimits are
54
51
  # set to 75% of the bind parameter limit of the implementation.
55
- DIALECT_INLIMIT_MAP = {"postgresql": 24000, "sqlite": 750}
52
+ DIALECT_INLIMIT_MAP = {"postgresql": 24000}
56
53
  DEFAULT_INLIMIT = 100
57
54
 
58
-
59
55
  # NOTE: Obviously these type annotations are useless, but sadly they're what
60
56
  # is in the upstream sqlalchemy2-stubs[0].
61
57
  #
@@ -64,24 +60,6 @@ DEFAULT_INLIMIT = 100
64
60
  #
65
61
  # [0]: https://github.com/sqlalchemy/sqlalchemy2-stubs/blob/main/sqlalchemy-stubs/pool/events.pyi#L9
66
62
  # [1]: https://github.com/sqlalchemy/sqlalchemy/blob/main/lib/sqlalchemy/pool/events.py#L96-L100
67
- def _sqlite_on_connect(conn: Any, record: Any) -> None:
68
- """SQLite ``PRAGMA`` statements to execute immediately upon connection.
69
-
70
- These statements configure the behaviour of the database, and are specific
71
- to SQLite.
72
-
73
- See https://www.sqlite.org/pragma.html for details.
74
-
75
- Args:
76
- conn (DBAPIConnection): The DBAPI connection that was just connected.
77
- record (_ConnectionRecord): The connection record which contains the
78
- DBAPI connection.
79
-
80
- """
81
- # Use journal_mode=WAL to allow read/write concurrency, as well as the
82
- # performance improvements it brings.
83
- conn.execute("PRAGMA journal_mode=WAL")
84
- conn.execute("PRAGMA synchronous=NORMAL")
85
63
 
86
64
 
87
65
  class SqlProvider:
@@ -93,17 +71,14 @@ class SqlProvider:
93
71
 
94
72
  Args:
95
73
  connection_string (str | None): The connection string to use when
96
- creating a database connection. If ``None`` then a temporary
97
- SQLite database will be created for the lifetime of this
98
- object.
74
+ creating a database connection. Must be a valid postgres database.
99
75
 
100
76
  connection_timeout (int): The timeout to use when attempting to
101
77
  connect to the database, in seconds. Defaults to 5 seconds if
102
78
  unset.
103
79
 
104
80
  lock_timeout (int): The timeout to use when the connection
105
- holds a lock in the database. This is supported only if the database
106
- backend is PostgresQL.
81
+ holds a lock in the database.
107
82
 
108
83
  connect_args (dict[str, Any] | None): Dictionary of DBAPI
109
84
  connection arguments to pass to the engine. See the
@@ -132,7 +107,7 @@ class SqlProvider:
132
107
  publishing.
133
108
 
134
109
  Raises:
135
- ValueError: when ``connection_string`` specifies an in-memory SQLite
110
+ ValueError: when ``connection_string`` doesn't specify a Postgresql
136
111
  database.
137
112
 
138
113
  .. _docs: https://docs.sqlalchemy.org/en/14/core/engines.html#use-the-connect-args-dictionary-parameter
@@ -155,16 +130,10 @@ class SqlProvider:
155
130
  ):
156
131
  """Initialize an SqlProvider."""
157
132
  self._database_tempfile = None
158
- # If we don't have a connection string, we'll make a tempfile to use
159
- # as an SQLite database. This tempfile needs to exist for the lifetime
160
- # of the SqlProvider.
133
+ # If we don't have a connection string, we'll throw a ValueError and some info about setting up a
134
+ # postgres database.
161
135
  if not connection_string:
162
- self._database_tempfile = NamedTemporaryFile(prefix="bgd-", suffix=".db")
163
- LOGGER.warning(
164
- "No connection string specified for the SQL provider, will use SQLite with tempfile.",
165
- tags=dict(tempfile=self._database_tempfile.name),
166
- )
167
- connection_string = f"sqlite:///{self._database_tempfile.name}"
136
+ raise ValueError(f"No connection string specified for the SQL provider\n\n{USE_POSTGRES_MESSAGE}")
168
137
 
169
138
  # Set up database connection
170
139
  self._session_factory = sessionmaker(future=True)
@@ -224,8 +193,7 @@ class SqlProvider:
224
193
  is applied.
225
194
 
226
195
  lock_timeout (int): The timeout to use when the connection
227
- holds a lock in the database. This is supported only if the database
228
- backend is PostgresQL.
196
+ holds a lock in the database.
229
197
 
230
198
  connect_args: Dictionary of DBAPI
231
199
  connection arguments to pass to the engine. See the
@@ -257,35 +225,24 @@ class SqlProvider:
257
225
  database defined by ``connection_string``.
258
226
 
259
227
  Raises:
260
- ValueError: when attempting to connect to an in-memory SQLite
228
+ ValueError: when attempting to connect to a non Postgresql
261
229
  database.
262
230
 
263
231
  .. _docs: https://docs.sqlalchemy.org/en/14/core/engines.html#use-the-connect-args-dictionary-parameter
264
232
 
265
233
  """
234
+ # Disallow sqlite for the scheduler db
235
+ # theres no reason to support a non production ready scheduler implementation
236
+
266
237
  # Disallow sqlite in-memory because multi-threaded access to it is
267
238
  # complex and potentially problematic at best
268
239
  # ref: https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#threading-pooling-behavior
269
- if is_sqlite_inmemory_connection_string(connection_string):
270
- raise ValueError(
271
- "Cannot use SQLite in-memory with BuildGrid "
272
- f"(connection_string=[{connection_string}]). Use a file or "
273
- "leave the connection_string empty for a tempfile."
274
- )
275
240
 
276
- # Deprecate sqlite for the scheduler db
277
- if is_sqlite_connection_string(connection_string):
278
- LOGGER.warning(
279
- "\n\n"
280
- "Using deprecated dialect SQLite.\n"
281
- "\n"
282
- "Deployments, CI pipelines and developer setups will need to switch any SQLite storage"
283
- "(cas storage, cas index, job scheduler) over to Postgresql before updating.\n"
284
- "\n"
285
- "For non production use a preconfigured docker buildgrid database is available from:"
286
- "registry.gitlab.com/buildgrid/buildgrid.hub.docker.com/buildgrid-postgres:nightly"
287
- "an example compose file for the database can be found at "
288
- "https://gitlab.com/BuildGrid/buildgrid.hub.docker.com/-/blob/master/Composefile.buildbox.yml?ref_type=heads\n" # noqa: E501
241
+ # Ensure only postgres is supported
242
+
243
+ if not is_postgresql_connection_string(connection_string):
244
+ raise ValueError(
245
+ f"Cannot use database (connection_string=[{connection_string}]).\n\n{USE_POSTGRES_MESSAGE}"
289
246
  )
290
247
 
291
248
  extra_engine_args: dict[str, Any] = {}
@@ -295,12 +252,9 @@ class SqlProvider:
295
252
  extra_engine_args["connect_args"] = {}
296
253
 
297
254
  if connection_timeout > 0:
298
- if is_sqlite_connection_string(connection_string):
299
- extra_engine_args["connect_args"]["timeout"] = connection_timeout
300
- elif is_psycopg2_connection_string(connection_string):
301
- extra_engine_args["connect_args"]["connect_timeout"] = connection_timeout
302
- if lock_timeout > 0 and is_psycopg2_connection_string(connection_string):
303
- # Additional postgres specific timeouts
255
+ extra_engine_args["connect_args"]["connect_timeout"] = connection_timeout
256
+ if lock_timeout > 0:
257
+ # Additional timeouts
304
258
  # Additional libpg options
305
259
  # Note that those timeouts are in milliseconds (so *1000)
306
260
  # User might specifically set options... do not override in this case.
@@ -326,10 +280,6 @@ class SqlProvider:
326
280
  engine = create_engine(connection_string, echo=False, future=True, **extra_engine_args)
327
281
  self._session_factory.configure(bind=engine)
328
282
 
329
- # Register sqlite-specific connection callback.
330
- if engine.dialect.name == "sqlite":
331
- event.listen(engine, "connect", _sqlite_on_connect)
332
-
333
283
  return engine
334
284
 
335
285
  @property
@@ -347,7 +297,6 @@ class SqlProvider:
347
297
  self,
348
298
  *,
349
299
  scoped: bool = False,
350
- sqlite_lock_immediately: bool = False,
351
300
  exceptions_to_not_raise_on: list[type[Exception]] | None = None,
352
301
  exceptions_to_not_rollback_on: list[type[Exception]] | None = None,
353
302
  expire_on_commit: bool = True,
@@ -362,10 +311,6 @@ class SqlProvider:
362
311
  if the underlying connection pool has recently been disposed of and
363
312
  refreshed due to connectivity issues.
364
313
 
365
- When ``sqlite_lock_immediately`` is ``True``, the Session will not
366
- yield until the database has been locked by entering into a write
367
- transaction when using SQLite.
368
-
369
314
  If an Exception is raised whilst in the managed context, the ongoing
370
315
  database transaction is rolled back, and the Exception is reraised.
371
316
  Some Exceptions which suggest a transient connection issue with the
@@ -386,13 +331,6 @@ class SqlProvider:
386
331
  session. This results in reuse of the underlying Session object
387
332
  in a given thread.
388
333
 
389
- sqlite_lock_immediately: If true, execute a ``BEGIN IMMEDIATE``
390
- statement as soon as the session is created when using SQLite.
391
- This allows locking for the lifetime of the ``Session`` within
392
- this ContextManager, enabling similar behaviour to
393
- ``SELECT ... FOR UPDATE`` in other dialects. Defaults to
394
- ``False``.
395
-
396
334
  exceptions_to_not_raise_on: The list of error types to be suppressed
397
335
  within the context rather than re-raised. Defaults to ``None``,
398
336
  meaning all SQLAlchemyErrors will be re-raised.
@@ -442,8 +380,6 @@ class SqlProvider:
442
380
  # Try to obtain a session
443
381
  try:
444
382
  session = factory() if scoped else factory(expire_on_commit=expire_on_commit)
445
- if sqlite_lock_immediately and session.bind.name == "sqlite": # type: ignore
446
- session.execute(text("BEGIN IMMEDIATE"))
447
383
  except Exception as e:
448
384
  LOGGER.error("Unable to obtain a database session.", exc_info=True)
449
385
  raise DatabaseError("Unable to obtain a database session.") from e
@@ -489,7 +425,6 @@ class SqlProvider:
489
425
  def scoped_session(
490
426
  self,
491
427
  *,
492
- sqlite_lock_immediately: bool = False,
493
428
  exceptions_to_not_raise_on: list[type[Exception]] | None = None,
494
429
  exceptions_to_not_rollback_on: list[type[Exception]] | None = None,
495
430
  ) -> Generator[Session, None, None]:
@@ -512,7 +447,6 @@ class SqlProvider:
512
447
  """
513
448
  with self.session(
514
449
  scoped=True,
515
- sqlite_lock_immediately=sqlite_lock_immediately,
516
450
  exceptions_to_not_raise_on=exceptions_to_not_raise_on,
517
451
  exceptions_to_not_rollback_on=exceptions_to_not_rollback_on,
518
452
  ) as session:
@@ -89,14 +89,16 @@ LIST_OPERATIONS_SORT_KEYS = {
89
89
  "command": SortKeySpec("command", JobEntry.__tablename__),
90
90
  }
91
91
 
92
-
93
- def is_sqlite_connection_string(connection_string: str) -> bool:
94
- if connection_string:
95
- return connection_string.startswith("sqlite")
96
- return False
92
+ USE_POSTGRES_MESSAGE = (
93
+ "For production use setup a postgresql database.\n"
94
+ "For CI and local development use the preconfigured docker buildgrid database from:\n"
95
+ "registry.gitlab.com/buildgrid/buildgrid.hub.docker.com/buildgrid-postgres:nightly\n"
96
+ "an example compose file for the database can be found at \n"
97
+ "https://gitlab.com/BuildGrid/buildgrid.hub.docker.com/-/blob/master/Composefile.buildbox.yml?ref_type=heads\n" # noqa: E501
98
+ )
97
99
 
98
100
 
99
- def is_psycopg2_connection_string(connection_string: str) -> bool:
101
+ def is_postgresql_connection_string(connection_string: str) -> bool:
100
102
  if connection_string:
101
103
  if connection_string.startswith("postgresql:"):
102
104
  return True
@@ -105,35 +107,6 @@ def is_psycopg2_connection_string(connection_string: str) -> bool:
105
107
  return False
106
108
 
107
109
 
108
- def is_sqlite_inmemory_connection_string(full_connection_string: str) -> bool:
109
- if is_sqlite_connection_string(full_connection_string):
110
- # Valid connection_strings for in-memory SQLite which we don't support could look like:
111
- # "sqlite:///file:memdb1?option=value&cache=shared&mode=memory",
112
- # "sqlite:///file:memdb1?mode=memory&cache=shared",
113
- # "sqlite:///file:memdb1?cache=shared&mode=memory",
114
- # "sqlite:///file::memory:?cache=shared",
115
- # "sqlite:///file::memory:",
116
- # "sqlite:///:memory:",
117
- # "sqlite:///",
118
- # "sqlite://"
119
- # ref: https://www.sqlite.org/inmemorydb.html
120
- # Note that a user can also specify drivers, so prefix could become 'sqlite+driver:///'
121
- connection_string = full_connection_string
122
-
123
- uri_split_index = connection_string.find("?")
124
- if uri_split_index != -1:
125
- connection_string = connection_string[0:uri_split_index]
126
-
127
- if connection_string.endswith((":memory:", ":///", "://")):
128
- return True
129
- elif uri_split_index != -1:
130
- opts = full_connection_string[uri_split_index + 1 :].split("&")
131
- if "mode=memory" in opts:
132
- return True
133
-
134
- return False
135
-
136
-
137
110
  class SQLPoolDisposeHelper:
138
111
  """Helper class for disposing of SQL session connections"""
139
112
 
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
 
15
15
 
16
- __version__ = "0.2.52"
16
+ __version__ = "0.3.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: buildgrid
3
- Version: 0.2.52
3
+ Version: 0.3.0
4
4
  Summary: A remote execution service
5
5
  License: Apache License, Version 2.0
6
6
  Project-URL: Homepage, https://buildgrid.build
@@ -42,7 +42,7 @@ Requires-Dist: fakeredis>=2.10.1; extra == "redis"
42
42
  Requires-Dist: redis>=4.5.1; extra == "redis"
43
43
  Requires-Dist: hiredis; extra == "redis"
44
44
  Provides-Extra: docs
45
- Requires-Dist: Sphinx; extra == "docs"
45
+ Requires-Dist: Sphinx<=8; extra == "docs"
46
46
  Requires-Dist: sphinx-click; extra == "docs"
47
47
  Requires-Dist: sphinx-rtd-theme; extra == "docs"
48
48
  Requires-Dist: sphinxcontrib-apidoc; extra == "docs"
@@ -77,6 +77,7 @@ Requires-Dist: pytest-xdist; extra == "dev"
77
77
  Requires-Dist: memray; extra == "dev"
78
78
  Requires-Dist: ruff; extra == "dev"
79
79
  Requires-Dist: grpcio-tools; extra == "dev"
80
+ Requires-Dist: bump4version; extra == "dev"
80
81
  Provides-Extra: mypy
81
82
  Requires-Dist: mypy; extra == "mypy"
82
83
  Requires-Dist: SQLAlchemy[mypy]; extra == "mypy"
@@ -208,7 +208,7 @@ buildgrid/server/servicer.py,sha256=oqU9MaSxxHTDmSxobFTo9YmJctaUCklE2Dj-vfYWKkc,
208
208
  buildgrid/server/settings.py,sha256=Iy4eS9Putr5GroIVqYNeTcRY7gbvq29wgQoMDQgPJtA,5604
209
209
  buildgrid/server/threading.py,sha256=4QKQYev2KoO2Q-S_OyaoR9qpWyDTVzGMWVe9o2a1yIU,4743
210
210
  buildgrid/server/types.py,sha256=xG3bx64pbWMuEwXLuI0o8c2unt2rU2C4zsmUfmMT12c,1323
211
- buildgrid/server/version.py,sha256=n2RBFSFcPdtiRMdJG5gZUeScD96hl-zdswg8rwRhQmU,604
211
+ buildgrid/server/version.py,sha256=YJSMuhVWkHaaftw-EYGH7w-GAiZQGnQrRBy8LqrQgWw,603
212
212
  buildgrid/server/actioncache/__init__.py,sha256=g9lb8Sn7NY5KOjkMr9GQoJovCVDEg_Fxz_EhdDbhP1I,579
213
213
  buildgrid/server/actioncache/instance.py,sha256=UCR7ZGkv4fJOXjeIILMAdTSFWcGgBSYlBg8fMaPJpaI,3139
214
214
  buildgrid/server/actioncache/service.py,sha256=WcikJAzFYOYX-tgiOfGGcOnPoubrCd4yP-EhKCHEW0c,2021
@@ -268,16 +268,13 @@ buildgrid/server/cas/storage/replicated.py,sha256=DF_oku2QJAigiRTz6358ZBy4LzQiIH
268
268
  buildgrid/server/cas/storage/s3.py,sha256=fHsbNBYBN6x2DnlG22_UA4GMjcqGEMlOU6yIHueE5mc,20058
269
269
  buildgrid/server/cas/storage/sharded.py,sha256=WYehvpn1AD-pvGsZDjzIZQRLjCyw7eEEjHymYoMzg2Q,7076
270
270
  buildgrid/server/cas/storage/size_differentiated.py,sha256=puT7xMhT_0T1hKGJf_kjjbCcYsmfhQnsEvsRClmk59Y,8223
271
- buildgrid/server/cas/storage/sql.py,sha256=T4huxHnZMxR5JnalonzSI45H_tWgICxF9dl3LOjWpYo,10274
271
+ buildgrid/server/cas/storage/sql.py,sha256=ERiHfqF9DA1Tu75QxHP2okAtQB3oA-DCFA95D6KQ6Js,9589
272
272
  buildgrid/server/cas/storage/storage_abc.py,sha256=BTLNiAr31amzz22mcLy8ctM9lCcNypL79iD3hPmIUMI,7728
273
273
  buildgrid/server/cas/storage/with_cache.py,sha256=IB-pq5S6R6V037RQiS4jGk9Jm5Wj_Qdy7WChs0xiDqI,8132
274
274
  buildgrid/server/cas/storage/index/__init__.py,sha256=adgShFqjP778F2KJoM-z7stkT9V_3BPuc3uodlf1buw,579
275
275
  buildgrid/server/cas/storage/index/index_abc.py,sha256=JaFHjnePcazCWqCwsrlBYybArc4d4KBM6Utv4xyK7gI,3189
276
276
  buildgrid/server/cas/storage/index/redis.py,sha256=nJqZ9HEk65zxNnrD-7sQ6aS5PGy5l55p6-Rs-oSRSZY,16556
277
- buildgrid/server/cas/storage/index/sql.py,sha256=EAYZ9rhbaLbPDtu7m0At5wmxn5-If99sBY16fA3KSZI,40485
278
- buildgrid/server/cas/storage/index/sql_dialect_delegates/__init__.py,sha256=2J1pwjLJgO2wScTgQ5p7KaqGxdfKZXf4ouu6oKn5SVM,146
279
- buildgrid/server/cas/storage/index/sql_dialect_delegates/postgresqldelegate.py,sha256=tIXpp2wYgc3Hd5oXdZEAuhGLHrbDUe4EmKjgZPkik7Y,2316
280
- buildgrid/server/cas/storage/index/sql_dialect_delegates/sqlitedelegate.py,sha256=Pq7QAkT0GeHrTyDnUFd-9nJ6moFqynAEHDiGBBjjKcc,2300
277
+ buildgrid/server/cas/storage/index/sql.py,sha256=b01u2DdalQqMWMf9_SCE6l7i2Vp4_61pmeul7Qk8bWU,41016
281
278
  buildgrid/server/cleanup/__init__.py,sha256=TTer4pMMV4HOzR6rYI9yPzDlMXEWnY_SXczlHiFZCng,579
282
279
  buildgrid/server/cleanup/cleanup.py,sha256=Yyhc9LZpvIZnQNVn-mgnMfdPBrd1x9KMsqrHwmRhzf8,13606
283
280
  buildgrid/server/cleanup/janitor/__init__.py,sha256=XaoGqSD-oQwbfSl8OcR0U4pScRcBueB4sU1tpcZNGtk,579
@@ -334,18 +331,18 @@ buildgrid/server/scheduler/__init__.py,sha256=arCg8LWFATeX1tj-s0keVYP8p3wwrrUlCV
334
331
  buildgrid/server/scheduler/assigner.py,sha256=wHPAhyiQxYABZJXaUc2g5yFzM78Z0U5nvGV3X9h5pCM,10512
335
332
  buildgrid/server/scheduler/cohorts.py,sha256=L_5YZRiVOwPPGStfqnnQXknO5Ja-SC0vq0xjw4XgP-I,1426
336
333
  buildgrid/server/scheduler/events.py,sha256=cM7Z7Htr2pYKhltJxfg1YRo0q524yZaGm8yXvRehivk,1453
337
- buildgrid/server/scheduler/impl.py,sha256=q8U92yEbdFqzOYNrIGn9d4J8bCBCCIGPy4ZGg5YpOvM,137141
338
- buildgrid/server/scheduler/notifier.py,sha256=uypoIXZowpAIsDQ728VCuFJ4MN2zJld1npnanCWHTrw,8854
334
+ buildgrid/server/scheduler/impl.py,sha256=jP0cpdmnVxroJ0mLbuNDN2nfKtpn8V3bim9FQOno25I,135536
335
+ buildgrid/server/scheduler/notifier.py,sha256=22ZsKwyf2oQirAjrwROkvgvr4C_TMUNyhOmtro4uM4I,7121
339
336
  buildgrid/server/scheduler/properties.py,sha256=2GydX8KUy9MFv1_JznIkGfWE_wOS0m_XapSv6Gp4pCM,11260
340
337
  buildgrid/server/sql/__init__.py,sha256=zbeeRP9BEeDzR-Mx2Ip6SUr49J8eeXsuREgljJTrHkk,579
341
- buildgrid/server/sql/models.py,sha256=Pd43qOhmtDDSV0AaHiZDeCPP09W4osiLMBLW3v_wU8w,14424
342
- buildgrid/server/sql/provider.py,sha256=xRF5_GDkz1u1VawcNzySLEqTpIV6FSzRsZIo8iDYEzM,22544
343
- buildgrid/server/sql/utils.py,sha256=yXG6wWIqKk2AP5nRpZ6AmuH8vuSQy-Zk3L_NeoRNKCg,17414
338
+ buildgrid/server/sql/models.py,sha256=jAuNLLvdlBWf7uZWlPLMSC1dTWHYlqlk9Pn5A04rQcg,14374
339
+ buildgrid/server/sql/provider.py,sha256=CXOpjcUY-InwmGnZTpCd4_ziOdROiDY34_SRpqsBwwk,18965
340
+ buildgrid/server/sql/utils.py,sha256=j76Z_qtGawz6O7vO4-zgCzBV4ylhKszcd_6iY0gV4W4,16470
344
341
  buildgrid/server/sql/alembic/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38
345
342
  buildgrid/server/sql/alembic/env.py,sha256=vRRLEpyPS_a-jeKKYvORE0NW2goBnjN7v62n7ix_t28,4534
346
343
  buildgrid/server/sql/alembic/script.py.mako,sha256=z6re8oZ_Qk3kZBPMRWWl0b0_sb29J9V7ruHwBlODDHw,1090
347
344
  buildgrid/server/sql/alembic/versions/0596ea8f5c61_add_bot_locality_hints_table_and_.py,sha256=lFYk8PPZs2TNBDKbDlYKcDlxqt4CQUO2Jts_sPymeao,2909
348
- buildgrid/server/sql/alembic/versions/0c17a7cb2bc5_initial_database_state.py,sha256=l2F2549VRygDOi_xuZ4Zal0S411JToWYlSy_L3eju-s,11713
345
+ buildgrid/server/sql/alembic/versions/0c17a7cb2bc5_initial_database_state.py,sha256=UlIiOq9d0D5pTXsdIr8k454yYvCv14uQ2XCYFzb2V3E,11433
349
346
  buildgrid/server/sql/alembic/versions/12992085e81a_add_a_job_index_on_worker_name_and_.py,sha256=Dg5kxHvR4REXmhzpd72EXriMZRCFzO9XRTPRKmWcluY,1681
350
347
  buildgrid/server/sql/alembic/versions/1f959c3834d3_drop_the_leases_table.py,sha256=V0X1XVVB_iq3nrwnVWDM7rhVrFDG3T8byZChsNwjcFA,1645
351
348
  buildgrid/server/sql/alembic/versions/22cc661efef9_add_instance_quotas_table.py,sha256=zBf23GKRMJq5scFjWC83oMHvKy7K55zOTyWYrbTuJtI,1702
@@ -368,9 +365,9 @@ buildgrid/server/utils/async_lru_cache.py,sha256=iLKeRPoZtZb1wC5AtcyQm8Wt0Bx-KZm
368
365
  buildgrid/server/utils/bots.py,sha256=c8hn7tbCecru-m2wicRmtKU5v5rSZPGlk97Yc6eUHgQ,1729
369
366
  buildgrid/server/utils/cancellation.py,sha256=pNETzKNoXg0AsXOXKCcLWlFl7SVKdkKinlqWl7MesRA,1703
370
367
  buildgrid/server/utils/digests.py,sha256=YNrWeHdbNp7OVTcsInjs30C33z_t9GQ_noMd14bpqPQ,2424
371
- buildgrid-0.2.52.dist-info/licenses/LICENSE,sha256=swa3Vs7GgALaG9p-e05M-WLkhd_U9QknacNkyVZ85xA,11338
372
- buildgrid-0.2.52.dist-info/METADATA,sha256=w-UPonoGIo3xi-h7rqGWp-4focQ-2iax4Q6H18iXIEs,7040
373
- buildgrid-0.2.52.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
374
- buildgrid-0.2.52.dist-info/entry_points.txt,sha256=uyFAXiR9d6EDfSA5vWT8xskz6xalt4PdTuRruT6Q8rk,49
375
- buildgrid-0.2.52.dist-info/top_level.txt,sha256=T6TYhI_k6NTm2871tIxGCyBIqzlKxylgF9KDLU0Hi7o,10
376
- buildgrid-0.2.52.dist-info/RECORD,,
368
+ buildgrid-0.3.0.dist-info/licenses/LICENSE,sha256=swa3Vs7GgALaG9p-e05M-WLkhd_U9QknacNkyVZ85xA,11338
369
+ buildgrid-0.3.0.dist-info/METADATA,sha256=dZxwUseVWfUjfk3VSlPCxXeMmdLw58lpzN7PdQ0ivyA,7086
370
+ buildgrid-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
371
+ buildgrid-0.3.0.dist-info/entry_points.txt,sha256=uyFAXiR9d6EDfSA5vWT8xskz6xalt4PdTuRruT6Q8rk,49
372
+ buildgrid-0.3.0.dist-info/top_level.txt,sha256=T6TYhI_k6NTm2871tIxGCyBIqzlKxylgF9KDLU0Hi7o,10
373
+ buildgrid-0.3.0.dist-info/RECORD,,
@@ -1,4 +0,0 @@
1
- from .postgresqldelegate import PostgreSQLDelegate
2
- from .sqlitedelegate import SQLiteDelegate
3
-
4
- __all__ = ["PostgreSQLDelegate", "SQLiteDelegate"]
@@ -1,65 +0,0 @@
1
- # Copyright (C) 2020 Bloomberg LP
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # <http://www.apache.org/licenses/LICENSE-2.0>
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """
16
- PostgreSQLDelegate
17
- ==================
18
-
19
- Extra functionality for the SQL index when using a PostgreSQL backend.
20
-
21
- """
22
-
23
- from datetime import datetime
24
- from typing import cast
25
-
26
- from sqlalchemy import Table
27
- from sqlalchemy.dialects.postgresql import insert
28
- from sqlalchemy.orm.session import Session as SessionType
29
- from sqlalchemy.sql.functions import coalesce
30
-
31
- from buildgrid._protos.build.bazel.remote.execution.v2.remote_execution_pb2 import Digest
32
- from buildgrid.server.sql.models import IndexEntry
33
-
34
-
35
- class PostgreSQLDelegate:
36
- @staticmethod
37
- def _save_digests_to_index(
38
- digest_blob_pairs: list[tuple[Digest, bytes | None]], session: SessionType, max_inline_blob_size: int
39
- ) -> None:
40
- # See discussion of __table__ typing in https://github.com/sqlalchemy/sqlalchemy/issues/9130
41
- index_table = cast(Table, IndexEntry.__table__)
42
- update_time = datetime.utcnow()
43
- new_rows = [
44
- {
45
- "digest_hash": digest.hash,
46
- "digest_size_bytes": digest.size_bytes,
47
- "accessed_timestamp": update_time,
48
- "inline_blob": (blob if digest.size_bytes <= max_inline_blob_size else None),
49
- "deleted": False,
50
- }
51
- for (digest, blob) in digest_blob_pairs
52
- ]
53
-
54
- base_insert_stmt = insert(index_table).values(new_rows)
55
-
56
- update_stmt = base_insert_stmt.on_conflict_do_update(
57
- index_elements=["digest_hash"],
58
- set_={
59
- "accessed_timestamp": update_time,
60
- "inline_blob": coalesce(base_insert_stmt.excluded.inline_blob, index_table.c.inline_blob),
61
- "deleted": False,
62
- },
63
- )
64
-
65
- session.execute(update_stmt)
@@ -1,65 +0,0 @@
1
- # Copyright (C) 2020 Bloomberg LP
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # <http://www.apache.org/licenses/LICENSE-2.0>
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """
16
- SQLiteDelegate
17
- ==================
18
-
19
- Extra functionality for the SQL index when using a SQLite backend.
20
-
21
- """
22
-
23
- from datetime import datetime
24
- from typing import cast
25
-
26
- from sqlalchemy import Table
27
- from sqlalchemy.dialects.sqlite import insert
28
- from sqlalchemy.orm.session import Session as SessionType
29
- from sqlalchemy.sql.functions import coalesce
30
-
31
- from buildgrid._protos.build.bazel.remote.execution.v2.remote_execution_pb2 import Digest
32
- from buildgrid.server.sql.models import IndexEntry
33
-
34
-
35
- class SQLiteDelegate:
36
- @staticmethod
37
- def _save_digests_to_index(
38
- digest_blob_pairs: list[tuple[Digest, bytes | None]], session: SessionType, max_inline_blob_size: int
39
- ) -> None:
40
- # See discussion of __table__ typing in https://github.com/sqlalchemy/sqlalchemy/issues/9130
41
- index_table = cast(Table, IndexEntry.__table__)
42
- update_time = datetime.utcnow()
43
- new_rows = [
44
- {
45
- "digest_hash": digest.hash,
46
- "digest_size_bytes": digest.size_bytes,
47
- "accessed_timestamp": update_time,
48
- "inline_blob": (blob if digest.size_bytes <= max_inline_blob_size else None),
49
- "deleted": False,
50
- }
51
- for (digest, blob) in digest_blob_pairs
52
- ]
53
-
54
- base_insert_stmt = insert(index_table).values(new_rows)
55
-
56
- update_stmt = base_insert_stmt.on_conflict_do_update(
57
- index_elements=["digest_hash"],
58
- set_={
59
- "accessed_timestamp": update_time,
60
- "inline_blob": coalesce(base_insert_stmt.excluded.inline_blob, index_table.c.inline_blob),
61
- "deleted": False,
62
- },
63
- )
64
-
65
- session.execute(update_stmt)