buildgrid 0.2.53__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1025,7 +1025,7 @@ def load_sql_scheduler(
1025
1025
  sql_ro = sql_ro or sql
1026
1026
  sql_notifier = sql_notifier or sql
1027
1027
 
1028
- logstream_url, logstream_credentials, logstream_instance = get_logstream_connection_info(logstream)
1028
+ logstream_url, logstream_credentials = get_logstream_connection_info(logstream)
1029
1029
  logstream_channel: grpc.Channel | None = None
1030
1030
  if logstream_url is not None:
1031
1031
  logstream_credentials = logstream_credentials or {}
@@ -1081,7 +1081,6 @@ def load_sql_scheduler(
1081
1081
  ),
1082
1082
  bot_session_keepalive_timeout=bot_session_keepalive_timeout,
1083
1083
  logstream_channel=logstream_channel,
1084
- logstream_instance=logstream_instance,
1085
1084
  asset_client=asset_client,
1086
1085
  queued_action_retention_hours=queued_action_retention_hours,
1087
1086
  completed_action_retention_hours=completed_action_retention_hours,
@@ -2150,7 +2149,6 @@ def load_metering_service_client(
2150
2149
  def load_asset_client(
2151
2150
  url: str,
2152
2151
  credentials: ClientCredentials | None = None,
2153
- instance_name: str = "",
2154
2152
  request_timeout: float = 5.0,
2155
2153
  retries: int = 3,
2156
2154
  ) -> AssetClient:
@@ -2162,7 +2160,6 @@ def load_asset_client(
2162
2160
 
2163
2161
  - !asset-client
2164
2162
  url: https://remote-asset.com
2165
- instance-name: dev
2166
2163
  credentials:
2167
2164
  tls-client-cert: /path/to/cert
2168
2165
  auth-token: /path/to/token
@@ -2179,7 +2176,7 @@ def load_asset_client(
2179
2176
  server_cert=credentials.get("tls-server-cert"),
2180
2177
  timeout=request_timeout,
2181
2178
  )
2182
- return AssetClient(channel=channel, instance_name=instance_name, retries=retries)
2179
+ return AssetClient(channel=channel, retries=retries)
2183
2180
 
2184
2181
 
2185
2182
  @object_tag("!introspection")
@@ -2378,18 +2375,16 @@ def _validate_server_credentials(credentials: dict[str, str] | None) -> None:
2378
2375
  sys.exit(-1)
2379
2376
 
2380
2377
 
2381
- def get_logstream_connection_info(logstream: Any) -> tuple[str | None, dict[str, str] | None, str | None]:
2378
+ def get_logstream_connection_info(logstream: Any) -> tuple[str | None, dict[str, str] | None]:
2382
2379
  logstream_url = None
2383
2380
  credentials = None
2384
- logstream_instance_name = None
2385
2381
  if logstream:
2386
2382
  logstream_url = logstream["url"]
2387
2383
  credentials = logstream.get("credentials")
2388
2384
  if not _validate_url_and_credentials(logstream_url, credentials=credentials):
2389
2385
  sys.exit(-1)
2390
- logstream_instance_name = logstream.get("instance-name", "")
2391
2386
 
2392
- return logstream_url, credentials, logstream_instance_name
2387
+ return logstream_url, credentials
2393
2388
 
2394
2389
 
2395
2390
  def get_schema(strict: bool = False) -> Any:
@@ -472,7 +472,7 @@ definitions:
472
472
  priority-assignment-percentage: { type: number, minimum: 0 }
473
473
  metering-service-client: { "$ref": "#/definitions/metering-service-client" }
474
474
  metering-throttle-action: { type: string, enum: [deprioritize, reject] }
475
- logstream: { "$ref": "#/definitions/grpc-connection-options" }
475
+ logstream: { "$ref": "#/definitions/logstream-connection-options" }
476
476
  asset-client: { "$ref": "#/definitions/asset-client" }
477
477
  action-browser-url: { type: string }
478
478
  poll-interval: { type: number, minimum: 0 }
@@ -645,12 +645,11 @@ definitions:
645
645
  properties:
646
646
  kind: { type: string, enum: [ "!asset-client" ] }
647
647
  url: { type: string }
648
- instance-name: { type: string }
649
648
  channel-options: { type: object, propertyNames: { pattern: "^[a-z0-9-]+$" } }
650
649
  request-timeout: { type: number }
651
650
  credentials: { "$ref": "#/definitions/grpc-credentials" }
652
651
  retries: { type: number }
653
- required: [kind, url, instance-name]
652
+ required: [kind, url]
654
653
 
655
654
  metering-service-client:
656
655
  type: object
@@ -783,16 +782,15 @@ definitions:
783
782
  retry-limit: { type: integer, min: 0 }
784
783
  required: [instance-name, index, high-watermark, low-watermark, batch-size]
785
784
 
786
- grpc-connection-options:
785
+ logstream-connection-options:
787
786
  type: object
788
787
  propertyNames: { pattern: "^[A-Za-z0-9-]*$" }
789
788
  properties:
790
789
  url: { type: string }
791
- instance-name: { type: string }
792
790
  channel-options: { type: object, propertyNames: { pattern: "^[a-z0-9-]+$" } }
793
791
  request-timeout: { type: number }
794
792
  credentials: { "$ref": "#/definitions/grpc-credentials" }
795
- required: [url, instance-name]
793
+ required: [url]
796
794
 
797
795
  grpc-credentials:
798
796
  type: object
@@ -17,8 +17,7 @@
17
17
  SQLIndex
18
18
  ==================
19
19
 
20
- A SQL index implementation. This can be pointed to either a remote SQL server
21
- or a local SQLite database.
20
+ A SQL index implementation. This must be pointed to a remote SQL server.
22
21
 
23
22
  """
24
23
 
@@ -35,6 +34,8 @@ from sqlalchemy.orm import InstrumentedAttribute, Session, load_only
35
34
  from sqlalchemy.orm.exc import StaleDataError
36
35
  from sqlalchemy.orm.query import Query
37
36
  from sqlalchemy.orm.session import Session as SessionType
37
+ from sqlalchemy.dialects.postgresql import insert
38
+ from sqlalchemy.sql.functions import coalesce
38
39
 
39
40
  from buildgrid._protos.build.bazel.remote.execution.v2.remote_execution_pb2 import Digest
40
41
  from buildgrid._protos.google.rpc import code_pb2
@@ -49,10 +50,8 @@ from buildgrid.server.sql.provider import SqlProvider
49
50
 
50
51
  from ..storage_abc import StorageABC
51
52
  from .index_abc import IndexABC
52
- from .sql_dialect_delegates import PostgreSQLDelegate, SQLiteDelegate
53
53
 
54
54
  LOGGER = buildgrid_logger(__name__)
55
- DIALECT_DELEGATES = {"postgresql": PostgreSQLDelegate, "sqlite": SQLiteDelegate}
56
55
 
57
56
  INLINE_BLOB_SIZE_HARD_MAXIMUM = 1000000000
58
57
 
@@ -151,9 +150,6 @@ class SQLIndex(IndexABC):
151
150
  unknown_args = kwargs_keys - available_options
152
151
  raise TypeError(f"Unknown keyword arguments: [{unknown_args}]")
153
152
 
154
- # Dialect-specific initialization
155
- self._dialect_delegate = DIALECT_DELEGATES.get(self._sql.dialect)
156
-
157
153
  if inclause_limit > 0:
158
154
  if inclause_limit > window_size:
159
155
  LOGGER.warning(
@@ -377,14 +373,32 @@ class SQLIndex(IndexABC):
377
373
 
378
374
  digest_blob_pairs = sorted(digest_blob_pairs, key=lambda pair: (pair[0].hash, pair[0].size_bytes))
379
375
 
380
- if self._dialect_delegate:
381
- try:
382
- self._dialect_delegate._save_digests_to_index( # type: ignore
383
- digest_blob_pairs, session, self._max_inline_blob_size
384
- )
385
- return
386
- except AttributeError:
387
- pass
376
+ # See discussion of __table__ typing in https://github.com/sqlalchemy/sqlalchemy/issues/9130
377
+ index_table = cast(Table, IndexEntry.__table__)
378
+ update_time = datetime.utcnow()
379
+ new_rows = [
380
+ {
381
+ "digest_hash": digest.hash,
382
+ "digest_size_bytes": digest.size_bytes,
383
+ "accessed_timestamp": update_time,
384
+ "inline_blob": (blob if digest.size_bytes <= self._max_inline_blob_size else None),
385
+ "deleted": False,
386
+ }
387
+ for (digest, blob) in digest_blob_pairs
388
+ ]
389
+
390
+ base_insert_stmt = insert(index_table).values(new_rows)
391
+
392
+ update_stmt = base_insert_stmt.on_conflict_do_update(
393
+ index_elements=["digest_hash"],
394
+ set_={
395
+ "accessed_timestamp": update_time,
396
+ "inline_blob": coalesce(base_insert_stmt.excluded.inline_blob, index_table.c.inline_blob),
397
+ "deleted": False,
398
+ },
399
+ )
400
+
401
+ session.execute(update_stmt)
388
402
 
389
403
  update_time = datetime.utcnow()
390
404
  # Figure out which digests we can just update
@@ -26,8 +26,7 @@ from io import BytesIO
26
26
  from typing import IO, Any, Iterator, Sequence, TypedDict, cast
27
27
 
28
28
  from sqlalchemy import CursorResult, delete, func, select
29
- from sqlalchemy.dialects.postgresql import insert as postgresql_insert
30
- from sqlalchemy.dialects.sqlite import insert as sqlite_insert
29
+ from sqlalchemy.dialects.postgresql import insert
31
30
  from sqlalchemy.exc import DBAPIError
32
31
  from sqlalchemy.orm.exc import StaleDataError
33
32
 
@@ -59,7 +58,7 @@ class SQLStorage(StorageABC):
59
58
  self._sql_ro = sql_ro_provider or sql_provider
60
59
  self._inclause_limit = self._sql.default_inlimit
61
60
 
62
- supported_dialects = ["postgresql", "sqlite"]
61
+ supported_dialects = ["postgresql"]
63
62
 
64
63
  if self._sql.dialect not in supported_dialects:
65
64
  raise RuntimeError(
@@ -71,14 +70,6 @@ class SQLStorage(StorageABC):
71
70
  with self._sql.session() as session:
72
71
  session.query(BlobEntry).first()
73
72
 
74
- def _sqlite_bulk_insert(self, new_rows: list[DigestRow]) -> None:
75
- with self._sql.session() as session:
76
- session.execute(sqlite_insert(BlobEntry).values(new_rows).on_conflict_do_nothing())
77
-
78
- def _postgresql_bulk_insert(self, new_rows: list[DigestRow]) -> None:
79
- with self._sql.session() as session:
80
- session.execute(postgresql_insert(BlobEntry).values(new_rows).on_conflict_do_nothing())
81
-
82
73
  def _bulk_insert(self, digests: list[tuple[Digest, bytes]]) -> None:
83
74
  # Sort digests by hash to ensure consistent order to minimize deadlocks
84
75
  # when BatchUpdateBlobs requests have overlapping blobs
@@ -87,12 +78,8 @@ class SQLStorage(StorageABC):
87
78
  for (digest, blob) in sorted(digests, key=lambda x: x[0].hash)
88
79
  ]
89
80
 
90
- if self._sql.dialect == "sqlite":
91
- self._sqlite_bulk_insert(new_rows)
92
- elif self._sql.dialect == "postgresql":
93
- self._postgresql_bulk_insert(new_rows)
94
- else:
95
- raise RuntimeError(f"Unsupported dialect {self._sql.dialect} for bulk_insert")
81
+ with self._sql.session() as session:
82
+ session.execute(insert(BlobEntry).values(new_rows).on_conflict_do_nothing())
96
83
 
97
84
  @timed(METRIC.STORAGE.STAT_DURATION, type=TYPE)
98
85
  def has_blob(self, digest: Digest) -> bool:
@@ -41,13 +41,11 @@ class AssetClient:
41
41
  def __init__(
42
42
  self,
43
43
  channel: grpc.Channel,
44
- instance_name: str,
45
44
  retries: int = 0,
46
45
  max_backoff: int = 64,
47
46
  should_backoff: bool = True,
48
47
  ) -> None:
49
48
  self._channel = channel
50
- self._instance_name = instance_name
51
49
  self._push_stub = PushStub(channel)
52
50
  self._fetch_stub = FetchStub(channel)
53
51
  self._retrier = GrpcRetrier(retries=retries, max_backoff=max_backoff, should_backoff=should_backoff)
@@ -68,6 +66,7 @@ class AssetClient:
68
66
  expire_at: datetime | None = None,
69
67
  referenced_blobs: Iterable[Digest] = [],
70
68
  referenced_directories: Iterable[Digest] = [],
69
+ instance_name: str,
71
70
  ) -> PushBlobResponse:
72
71
  def _push_blob() -> PushBlobResponse:
73
72
  qualifiers_pb = [Qualifier(name=name, value=value) for name, value in qualifiers.items()]
@@ -77,7 +76,7 @@ class AssetClient:
77
76
  expire_at_pb.FromDatetime(expire_at)
78
77
 
79
78
  request = PushBlobRequest(
80
- instance_name=self._instance_name,
79
+ instance_name=instance_name,
81
80
  uris=uris,
82
81
  qualifiers=qualifiers_pb,
83
82
  expire_at=expire_at_pb,
@@ -98,6 +97,7 @@ class AssetClient:
98
97
  expire_at: datetime | None = None,
99
98
  referenced_blobs: Iterable[Digest] = [],
100
99
  referenced_directories: Iterable[Digest] = [],
100
+ instance_name: str,
101
101
  ) -> PushDirectoryResponse:
102
102
  def _push_directory() -> PushDirectoryResponse:
103
103
  qualifiers_pb = [Qualifier(name=name, value=value) for name, value in qualifiers.items()]
@@ -107,7 +107,7 @@ class AssetClient:
107
107
  expire_at_pb.FromDatetime(expire_at)
108
108
 
109
109
  request = PushDirectoryRequest(
110
- instance_name=self._instance_name,
110
+ instance_name=instance_name,
111
111
  uris=uris,
112
112
  qualifiers=qualifiers_pb,
113
113
  expire_at=expire_at_pb,
@@ -31,7 +31,7 @@ from google.protobuf.internal.containers import RepeatedCompositeFieldContainer
31
31
  from google.protobuf.timestamp_pb2 import Timestamp
32
32
  from grpc import Channel
33
33
  from sqlalchemy import ColumnExpressionArgument, CursorResult, and_, delete, func, insert, or_, select, text, update
34
- from sqlalchemy.dialects import postgresql, sqlite
34
+ from sqlalchemy.dialects import postgresql
35
35
  from sqlalchemy.exc import IntegrityError
36
36
  from sqlalchemy.orm import Session, joinedload
37
37
  from sqlalchemy.sql.expression import Insert, Select
@@ -218,7 +218,6 @@ class Scheduler:
218
218
  metering_throttle_action: MeteringThrottleAction | None = None,
219
219
  bot_session_keepalive_timeout: int = 600,
220
220
  logstream_channel: Channel | None = None,
221
- logstream_instance: str | None = None,
222
221
  asset_client: AssetClient | None = None,
223
222
  queued_action_retention_hours: float | None = None,
224
223
  completed_action_retention_hours: float | None = None,
@@ -268,7 +267,6 @@ class Scheduler:
268
267
  self.metering_throttle_action = metering_throttle_action or MeteringThrottleAction.DEPRIORITIZE
269
268
  self.bot_session_keepalive_timeout = bot_session_keepalive_timeout
270
269
  self.logstream_channel = logstream_channel
271
- self.logstream_instance = logstream_instance
272
270
  self.asset_client = asset_client
273
271
  self.queued_action_retention_hours = queued_action_retention_hours
274
272
  self.completed_action_retention_hours = completed_action_retention_hours
@@ -414,7 +412,9 @@ class Scheduler:
414
412
  LOGGER.exception("Checking ActionCache for action failed.", tags=dict(digest=action_digest))
415
413
 
416
414
  # Extend retention for action
417
- self._update_action_retention(action, action_digest, self.queued_action_retention_hours)
415
+ self._update_action_retention(
416
+ action, action_digest, self.queued_action_retention_hours, instance_name=current_instance()
417
+ )
418
418
 
419
419
  return self.create_operation_for_new_job(
420
420
  action=action,
@@ -760,11 +760,10 @@ class Scheduler:
760
760
  )
761
761
 
762
762
  def _notify_job_updated(self, job_names: str | list[str], session: Session) -> None:
763
- if self._sql.dialect == "postgresql":
764
- if isinstance(job_names, str):
765
- job_names = [job_names]
766
- for job_name in job_names:
767
- session.execute(text(f"NOTIFY {NotificationChannel.JOB_UPDATED.value}, '{job_name}';"))
763
+ if isinstance(job_names, str):
764
+ job_names = [job_names]
765
+ for job_name in job_names:
766
+ session.execute(text(f"NOTIFY {NotificationChannel.JOB_UPDATED.value}, '{job_name}';"))
768
767
 
769
768
  def _get_operation(self, operation_name: str, session: Session) -> OperationEntry | None:
770
769
  statement = (
@@ -776,7 +775,7 @@ class Scheduler:
776
775
 
777
776
  def _batch_timeout_jobs(self, job_select_stmt: Select[Any], status_code: int, message: str) -> int:
778
777
  """Timeout all jobs selected by a query"""
779
- with self._sql.session(sqlite_lock_immediately=True, exceptions_to_not_raise_on=[Exception]) as session:
778
+ with self._sql.session(exceptions_to_not_raise_on=[Exception]) as session:
780
779
  # Get the full list of jobs to timeout
781
780
  job_entries = session.execute(job_select_stmt).scalars().all()
782
781
  jobs = []
@@ -838,7 +837,7 @@ class Scheduler:
838
837
  .limit(1)
839
838
  .with_for_update(skip_locked=True)
840
839
  )
841
- with self._sql.session(sqlite_lock_immediately=True, exceptions_to_not_raise_on=[Exception]) as session:
840
+ with self._sql.session(exceptions_to_not_raise_on=[Exception]) as session:
842
841
  job = session.execute(stale_job_statement).scalar_one_or_none()
843
842
  if not job:
844
843
  return False
@@ -916,22 +915,14 @@ class Scheduler:
916
915
  job.cancelled = True
917
916
 
918
917
  # If the job was assigned to a bot, we need to update the quota / capacity
919
- if self._sql.dialect == "postgresql":
920
- update_query = (
921
- update(BotEntry)
922
- .where(BotEntry.bot_id == job.worker_name)
923
- .values(capacity=BotEntry.capacity + 1)
924
- .returning(BotEntry.cohort)
925
- )
926
- if cohort := session.execute(update_query).scalar_one_or_none():
927
- self._update_instance_quota_usage(session, cohort, job.instance_name, -1, guard=None)
928
- else:
929
- if bot := session.execute(
930
- select(BotEntry).where(BotEntry.bot_id == job.worker_name).with_for_update()
931
- ).scalar_one_or_none():
932
- bot.capacity += 1
933
- if bot.cohort:
934
- self._update_instance_quota_usage(session, bot.cohort, job.instance_name, -1, guard=None)
918
+ update_query = (
919
+ update(BotEntry)
920
+ .where(BotEntry.bot_id == job.worker_name)
921
+ .values(capacity=BotEntry.capacity + 1)
922
+ .returning(BotEntry.cohort)
923
+ )
924
+ if cohort := session.execute(update_query).scalar_one_or_none():
925
+ self._update_instance_quota_usage(session, cohort, job.instance_name, -1, guard=None)
935
926
 
936
927
  session.add(
937
928
  JobHistoryEntry(
@@ -1157,8 +1148,7 @@ class Scheduler:
1157
1148
  self._notify_job_updated(job.name, session)
1158
1149
 
1159
1150
  LOGGER.debug("Assigned job to bot", tags=log_tags)
1160
- if self._sql.dialect == "postgresql":
1161
- session.execute(text(f"NOTIFY {NotificationChannel.JOB_ASSIGNED.value}, '{bot.name}';"))
1151
+ session.execute(text(f"NOTIFY {NotificationChannel.JOB_ASSIGNED.value}, '{bot.name}';"))
1162
1152
 
1163
1153
  def _match_bot_by_sampling(
1164
1154
  self, session: Session, query: Select[tuple[BotEntry]], sampling: SamplingConfig
@@ -1523,7 +1513,7 @@ class Scheduler:
1523
1513
  )
1524
1514
 
1525
1515
  updated = False
1526
- with self._sql.session(sqlite_lock_immediately=True) as session:
1516
+ with self._sql.session() as session:
1527
1517
  job = session.execute(job_statement).scalar_one_or_none()
1528
1518
  if job is not None:
1529
1519
  self._match_job_to_bot(session, job, failure_backoff, bot_assignment_fn, assigner_name)
@@ -1558,7 +1548,7 @@ class Scheduler:
1558
1548
  )
1559
1549
 
1560
1550
  updated = False
1561
- with self._sql.session(sqlite_lock_immediately=True) as session:
1551
+ with self._sql.session() as session:
1562
1552
  job = session.execute(job_statement).scalar_one_or_none()
1563
1553
  if job is not None:
1564
1554
  self._match_job_to_bot(session, job, failure_backoff, bot_assignment_fn, assigner_name)
@@ -1702,22 +1692,8 @@ class Scheduler:
1702
1692
  return num_rows_deleted
1703
1693
 
1704
1694
  def _insert_on_conflict_do_nothing(self, model: type[OrmBase]) -> Insert:
1705
- # `Insert.on_conflict_do_nothing` is a SQLAlchemy "generative method", it
1706
- # returns a modified copy of the statement it is called on. For
1707
- # some reason mypy can't understand this, so the errors are ignored here.
1708
- if self._sql.dialect == "sqlite":
1709
- sqlite_insert: sqlite.Insert = sqlite.insert(model)
1710
- return sqlite_insert.on_conflict_do_nothing()
1711
-
1712
- elif self._sql.dialect == "postgresql":
1713
- insertion: postgresql.Insert = postgresql.insert(model)
1714
- return insertion.on_conflict_do_nothing()
1715
-
1716
- else:
1717
- # Fall back to the non-specific insert implementation. This doesn't
1718
- # support `ON CONFLICT DO NOTHING`, so callers need to be careful to
1719
- # still catch IntegrityErrors if other database backends are possible.
1720
- return insert(model)
1695
+ insertion: postgresql.Insert = postgresql.insert(model)
1696
+ return insertion.on_conflict_do_nothing()
1721
1697
 
1722
1698
  def get_or_create_client_identity_in_store(
1723
1699
  self, session: Session, client_id: ClientIdentityEntry
@@ -1744,7 +1720,7 @@ class Scheduler:
1744
1720
  try:
1745
1721
  session.execute(insertion)
1746
1722
 
1747
- # Handle unique constraint violation when using an unsupported database (ie. not PostgreSQL or SQLite)
1723
+ # Handle unique constraint violation when using an unsupported database (ie. not PostgreSQL)
1748
1724
  except IntegrityError:
1749
1725
  LOGGER.debug("Handled IntegrityError when inserting client identity.")
1750
1726
 
@@ -1777,7 +1753,7 @@ class Scheduler:
1777
1753
  try:
1778
1754
  session.execute(insertion)
1779
1755
 
1780
- # Handle unique constraint violation when using an unsupported database (ie. not PostgreSQL or SQLite)
1756
+ # Handle unique constraint violation when using an unsupported database (ie. not PostgreSQL)
1781
1757
  except IntegrityError:
1782
1758
  LOGGER.debug("Handled IntegrityError when inserting request metadata.")
1783
1759
 
@@ -1999,11 +1975,11 @@ class Scheduler:
1999
1975
  return None
2000
1976
 
2001
1977
  def _create_logstream_for_job(self, job: JobEntry, log_tags: Tags) -> None:
2002
- if self.logstream_channel and self.logstream_instance is not None:
1978
+ if self.logstream_channel:
2003
1979
  try:
2004
1980
  action_digest = string_to_digest(job.action_digest)
2005
1981
  parent_base = f"{action_digest.hash}_{action_digest.size_bytes}_{int(time())}"
2006
- with logstream_client(self.logstream_channel, self.logstream_instance) as ls_client:
1982
+ with logstream_client(self.logstream_channel, job.instance_name) as ls_client:
2007
1983
  stdout_stream = ls_client.create(f"{parent_base}_stdout")
2008
1984
  stderr_stream = ls_client.create(f"{parent_base}_stderr")
2009
1985
  job.stdout_stream_name = stdout_stream.name
@@ -2527,9 +2503,12 @@ class Scheduler:
2527
2503
  Action.FromString(job.action),
2528
2504
  string_to_digest(job.action_digest),
2529
2505
  retention_hours=self.completed_action_retention_hours,
2506
+ instance_name=job.instance_name,
2530
2507
  )
2531
2508
  if action_result.ByteSize() > 0:
2532
- self._update_action_result_retention(action_result, retention_hours=self.action_result_retention_hours)
2509
+ self._update_action_result_retention(
2510
+ action_result, retention_hours=self.action_result_retention_hours, instance_name=job.instance_name
2511
+ )
2533
2512
 
2534
2513
  worker_duration = None
2535
2514
  if job.worker_start_timestamp is not None and job.worker_completed_timestamp is not None:
@@ -2773,7 +2752,9 @@ class Scheduler:
2773
2752
  except Exception as exc:
2774
2753
  LOGGER.exception("Cannot publish resource usage.", tags=dict(job_name=job_name), exc_info=exc)
2775
2754
 
2776
- def _update_action_retention(self, action: Action, action_digest: Digest, retention_hours: float | None) -> None:
2755
+ def _update_action_retention(
2756
+ self, action: Action, action_digest: Digest, retention_hours: float | None, instance_name: str
2757
+ ) -> None:
2777
2758
  if not self.asset_client or not retention_hours:
2778
2759
  return
2779
2760
  uri = DIGEST_URI_TEMPLATE.format(digest_hash=action_digest.hash)
@@ -2790,6 +2771,7 @@ class Scheduler:
2790
2771
  expire_at=expire_at,
2791
2772
  referenced_blobs=referenced_blobs,
2792
2773
  referenced_directories=referenced_directories,
2774
+ instance_name=instance_name,
2793
2775
  )
2794
2776
  LOGGER.debug(
2795
2777
  "Extended the retention of action.", tags=dict(digest=action_digest, retention_hours=retention_hours)
@@ -2798,7 +2780,9 @@ class Scheduler:
2798
2780
  LOGGER.exception("Failed to push action as an asset.", tags=dict(digest=action_digest))
2799
2781
  # Not a fatal path, don't reraise here
2800
2782
 
2801
- def _update_action_result_retention(self, action_result: ActionResult, retention_hours: float | None) -> None:
2783
+ def _update_action_result_retention(
2784
+ self, action_result: ActionResult, retention_hours: float | None, instance_name: str
2785
+ ) -> None:
2802
2786
  if not self.asset_client or not retention_hours:
2803
2787
  return
2804
2788
  digest = None
@@ -2837,6 +2821,7 @@ class Scheduler:
2837
2821
  expire_at=expire_at,
2838
2822
  referenced_blobs=referenced_blobs,
2839
2823
  referenced_directories=referenced_directories,
2824
+ instance_name=instance_name,
2840
2825
  )
2841
2826
  LOGGER.debug(
2842
2827
  "Extended the retention of action result.", tags=dict(digest=digest, retention_hours=retention_hours)
@@ -2853,18 +2838,9 @@ class Scheduler:
2853
2838
  if self.bot_locality_hint_limit == 0:
2854
2839
  return
2855
2840
 
2856
- # Insert new hint with dialect-specific seq handling
2857
- if self._sql.dialect == "postgresql":
2858
- # For PostgreSQL, use the sequence to get the next seq number
2859
- next_seq = None
2860
- else:
2861
- # For SQLite, manually find the max seq number
2862
- max_seq = session.execute(
2863
- select(func.coalesce(func.max(BotLocalityHintEntry.sequence_number), 0)).where(
2864
- BotLocalityHintEntry.bot_name == bot_name
2865
- )
2866
- ).scalar_one()
2867
- next_seq = max_seq + 1
2841
+ # Insert new hint with seq handling
2842
+ # For PostgreSQL, use the sequence to get the next seq number
2843
+ next_seq = None
2868
2844
 
2869
2845
  new_hint = BotLocalityHintEntry(
2870
2846
  bot_name=bot_name,
@@ -2969,9 +2945,6 @@ class Scheduler:
2969
2945
  # `greatest(0,_)` is needed if this feature is released when there are already running jobs
2970
2946
  # TODO: remove the safe-guard after the next minor version bump
2971
2947
  new_usage: Any = func.greatest(0, InstanceQuota.current_usage + delta)
2972
- if self._sql.dialect == "sqlite":
2973
- # SQLite does not support `greatest`, so we use a simpler update for it.
2974
- new_usage = InstanceQuota.current_usage + delta
2975
2948
 
2976
2949
  update_usage_query = (
2977
2950
  update(InstanceQuota)
@@ -25,7 +25,7 @@ from sqlalchemy import select as sql_select
25
25
  from sqlalchemy.orm import Session
26
26
 
27
27
  from buildgrid.server.logging import buildgrid_logger
28
- from buildgrid.server.sql.models import BotEntry, JobEntry
28
+ from buildgrid.server.sql.models import BotEntry
29
29
  from buildgrid.server.sql.provider import SqlProvider
30
30
  from buildgrid.server.threading import ContextWorker
31
31
 
@@ -79,10 +79,7 @@ class Notifier(Generic[T]):
79
79
  while not shutdown_requested.is_set():
80
80
  try:
81
81
  with self._sql.session() as session:
82
- if self._sql.dialect == "postgresql":
83
- self._listen_for_updates(shutdown_requested, session)
84
- else:
85
- self._poll_for_updates(shutdown_requested, session)
82
+ self._listen_for_updates(shutdown_requested, session)
86
83
  except Exception as e:
87
84
  LOGGER.warning(
88
85
  f"OperationsNotifier encountered exception: {e}.",
@@ -122,23 +119,6 @@ class Notifier(Generic[T]):
122
119
  notify = dbapi_connection.notifies.pop()
123
120
  self.notify(notify.payload)
124
121
 
125
- def _poll(self, names: list[str], session: Session) -> dict[str, T]:
126
- raise NotImplementedError()
127
-
128
- def _poll_for_updates(self, shutdown_requested: Event, session: Session) -> None:
129
- prev_data: dict[str, T] = {}
130
- while not shutdown_requested.is_set():
131
- with self._lock:
132
- names = list(self._listeners)
133
-
134
- next_data: dict[str, T] = self._poll(names, session)
135
- for name in next_data:
136
- if name not in prev_data or prev_data[name] != next_data[name]:
137
- self.notify(name)
138
-
139
- prev_data = next_data
140
- shutdown_requested.wait(timeout=self.poll_interval)
141
-
142
122
  def notify(self, listener_name: str) -> None:
143
123
  with self._lock:
144
124
  if listener_name in self._listeners:
@@ -179,27 +159,11 @@ class OperationsNotifier(Notifier[tuple[bool, int]]):
179
159
  """
180
160
  super().__init__(sql_provider, NotificationChannel.JOB_UPDATED, "OperationsNotifier", poll_interval)
181
161
 
182
- def _poll(self, names: list[str], session: Session) -> dict[str, tuple[bool, int]]:
183
- # Only query for the minimal amount of data required.
184
- # The subscribers can choose how they want to act (e.g. by querying the full job data).
185
- statement = sql_select(JobEntry.name, JobEntry.cancelled, JobEntry.stage).where(JobEntry.name.in_(names))
186
- next_data: dict[str, tuple[bool, int]] = {}
187
- for [name, cancelled, stage] in session.execute(statement).all():
188
- next_data[name] = (cancelled, stage)
189
- return next_data
190
-
191
162
 
192
163
  class BotNotifier(Notifier[str]):
193
164
  def __init__(self, sql_provider: SqlProvider, poll_interval: float = 1.0) -> None:
194
165
  super().__init__(sql_provider, NotificationChannel.JOB_ASSIGNED, "BotNotifier", poll_interval)
195
166
 
196
- def _poll(self, names: list[str], session: Session) -> dict[str, str]:
197
- statement = sql_select(BotEntry.name, BotEntry.lease_id).where(BotEntry.name.in_(names))
198
- next_data = {}
199
- for name, lease_id in session.execute(statement).all():
200
- next_data[name] = lease_id
201
- return next_data
202
-
203
167
  def listener_count_for_instance(self, instance_name: str) -> int:
204
168
  with self._lock:
205
169
  stmt = sql_select(func.count(BotEntry.name)).where(
@@ -119,7 +119,6 @@ def upgrade() -> None:
119
119
  ["worker_completed_timestamp"],
120
120
  unique=False,
121
121
  postgresql_where=sa.text("worker_completed_timestamp IS NOT NULL"),
122
- sqlite_where=sa.text("worker_completed_timestamp IS NOT NULL"),
123
122
  )
124
123
  op.create_index(
125
124
  "ix_worker_start_timestamp",
@@ -127,7 +126,6 @@ def upgrade() -> None:
127
126
  ["worker_start_timestamp"],
128
127
  unique=False,
129
128
  postgresql_where=sa.text("worker_start_timestamp IS NOT NULL"),
130
- sqlite_where=sa.text("worker_start_timestamp IS NOT NULL"),
131
129
  )
132
130
  op.create_table(
133
131
  "platform_properties",
@@ -220,13 +218,11 @@ def downgrade() -> None:
220
218
  "ix_worker_start_timestamp",
221
219
  table_name="jobs",
222
220
  postgresql_where=sa.text("worker_start_timestamp IS NOT NULL"),
223
- sqlite_where=sa.text("worker_start_timestamp IS NOT NULL"),
224
221
  )
225
222
  op.drop_index(
226
223
  "ix_worker_completed_timestamp",
227
224
  table_name="jobs",
228
225
  postgresql_where=sa.text("worker_completed_timestamp IS NOT NULL"),
229
- sqlite_where=sa.text("worker_completed_timestamp IS NOT NULL"),
230
226
  )
231
227
  op.drop_index(op.f("ix_jobs_worker_name"), table_name="jobs")
232
228
  op.drop_index("ix_jobs_stage_property_label", table_name="jobs")
@@ -37,7 +37,8 @@ from buildgrid.server.enums import LeaseState, OperationStage
37
37
 
38
38
  bigint = Annotated[int, "bigint"]
39
39
  # This gives us something to reference in the type_annotation_map to specify the JSONB variant when
40
- # using postgresql. When we drop SQLite support this won't be necessary versus just using JSONB in
40
+ # using postgresql.
41
+ # TODO now SQLite support has been dropped this won't be necessary versus just using JSONB in
41
42
  # the model directly.
42
43
  json = Annotated[JSON, "json"]
43
44
 
@@ -133,7 +134,6 @@ class JobEntry(Base):
133
134
  "worker_completed_timestamp",
134
135
  unique=False,
135
136
  postgresql_where=worker_completed_timestamp.isnot(None),
136
- sqlite_where=worker_completed_timestamp.isnot(None),
137
137
  ),
138
138
  Index(
139
139
  "ix_jobs_property_label_stage",
@@ -15,11 +15,10 @@
15
15
 
16
16
  from contextlib import contextmanager
17
17
  from datetime import timedelta
18
- from tempfile import NamedTemporaryFile
19
18
  from threading import Lock
20
19
  from typing import Any, Generator, Iterator
21
20
 
22
- from sqlalchemy import create_engine, event, text
21
+ from sqlalchemy import create_engine
23
22
  from sqlalchemy.engine import Engine
24
23
  from sqlalchemy.orm import Session, scoped_session, sessionmaker
25
24
  from sqlalchemy.pool import NullPool
@@ -37,9 +36,8 @@ from buildgrid.server.sql.models import Base
37
36
 
38
37
  from .utils import (
39
38
  SQLPoolDisposeHelper,
40
- is_psycopg2_connection_string,
41
- is_sqlite_connection_string,
42
- is_sqlite_inmemory_connection_string,
39
+ is_postgresql_connection_string,
40
+ USE_POSTGRES_MESSAGE,
43
41
  )
44
42
 
45
43
  LOGGER = buildgrid_logger(__name__)
@@ -47,15 +45,13 @@ LOGGER = buildgrid_logger(__name__)
47
45
  # Each dialect has a limit on the number of bind parameters allowed. This
48
46
  # matters because it determines how large we can allow our IN clauses to get.
49
47
  #
50
- # SQLite: 1000 https://www.sqlite.org/limits.html#max_variable_number
51
48
  # PostgreSQL: 32767 (Int16.MAX_VALUE) https://www.postgresql.org/docs/9.4/protocol-message-formats.html
52
49
  #
53
50
  # We'll refer to this as the "inlimit" in the code. The inlimits are
54
51
  # set to 75% of the bind parameter limit of the implementation.
55
- DIALECT_INLIMIT_MAP = {"postgresql": 24000, "sqlite": 750}
52
+ DIALECT_INLIMIT_MAP = {"postgresql": 24000}
56
53
  DEFAULT_INLIMIT = 100
57
54
 
58
-
59
55
  # NOTE: Obviously these type annotations are useless, but sadly they're what
60
56
  # is in the upstream sqlalchemy2-stubs[0].
61
57
  #
@@ -64,24 +60,6 @@ DEFAULT_INLIMIT = 100
64
60
  #
65
61
  # [0]: https://github.com/sqlalchemy/sqlalchemy2-stubs/blob/main/sqlalchemy-stubs/pool/events.pyi#L9
66
62
  # [1]: https://github.com/sqlalchemy/sqlalchemy/blob/main/lib/sqlalchemy/pool/events.py#L96-L100
67
- def _sqlite_on_connect(conn: Any, record: Any) -> None:
68
- """SQLite ``PRAGMA`` statements to execute immediately upon connection.
69
-
70
- These statements configure the behaviour of the database, and are specific
71
- to SQLite.
72
-
73
- See https://www.sqlite.org/pragma.html for details.
74
-
75
- Args:
76
- conn (DBAPIConnection): The DBAPI connection that was just connected.
77
- record (_ConnectionRecord): The connection record which contains the
78
- DBAPI connection.
79
-
80
- """
81
- # Use journal_mode=WAL to allow read/write concurrency, as well as the
82
- # performance improvements it brings.
83
- conn.execute("PRAGMA journal_mode=WAL")
84
- conn.execute("PRAGMA synchronous=NORMAL")
85
63
 
86
64
 
87
65
  class SqlProvider:
@@ -93,17 +71,14 @@ class SqlProvider:
93
71
 
94
72
  Args:
95
73
  connection_string (str | None): The connection string to use when
96
- creating a database connection. If ``None`` then a temporary
97
- SQLite database will be created for the lifetime of this
98
- object.
74
+ creating a database connection. Must be a valid postgres database.
99
75
 
100
76
  connection_timeout (int): The timeout to use when attempting to
101
77
  connect to the database, in seconds. Defaults to 5 seconds if
102
78
  unset.
103
79
 
104
80
  lock_timeout (int): The timeout to use when the connection
105
- holds a lock in the database. This is supported only if the database
106
- backend is PostgresQL.
81
+ holds a lock in the database.
107
82
 
108
83
  connect_args (dict[str, Any] | None): Dictionary of DBAPI
109
84
  connection arguments to pass to the engine. See the
@@ -132,7 +107,7 @@ class SqlProvider:
132
107
  publishing.
133
108
 
134
109
  Raises:
135
- ValueError: when ``connection_string`` specifies an in-memory SQLite
110
+ ValueError: when ``connection_string`` doesn't specify a Postgresql
136
111
  database.
137
112
 
138
113
  .. _docs: https://docs.sqlalchemy.org/en/14/core/engines.html#use-the-connect-args-dictionary-parameter
@@ -155,16 +130,10 @@ class SqlProvider:
155
130
  ):
156
131
  """Initialize an SqlProvider."""
157
132
  self._database_tempfile = None
158
- # If we don't have a connection string, we'll make a tempfile to use
159
- # as an SQLite database. This tempfile needs to exist for the lifetime
160
- # of the SqlProvider.
133
+ # If we don't have a connection string, we'll throw a ValueError and some info about setting up a
134
+ # postgres database.
161
135
  if not connection_string:
162
- self._database_tempfile = NamedTemporaryFile(prefix="bgd-", suffix=".db")
163
- LOGGER.warning(
164
- "No connection string specified for the SQL provider, will use SQLite with tempfile.",
165
- tags=dict(tempfile=self._database_tempfile.name),
166
- )
167
- connection_string = f"sqlite:///{self._database_tempfile.name}"
136
+ raise ValueError(f"No connection string specified for the SQL provider\n\n{USE_POSTGRES_MESSAGE}")
168
137
 
169
138
  # Set up database connection
170
139
  self._session_factory = sessionmaker(future=True)
@@ -224,8 +193,7 @@ class SqlProvider:
224
193
  is applied.
225
194
 
226
195
  lock_timeout (int): The timeout to use when the connection
227
- holds a lock in the database. This is supported only if the database
228
- backend is PostgresQL.
196
+ holds a lock in the database.
229
197
 
230
198
  connect_args: Dictionary of DBAPI
231
199
  connection arguments to pass to the engine. See the
@@ -257,35 +225,24 @@ class SqlProvider:
257
225
  database defined by ``connection_string``.
258
226
 
259
227
  Raises:
260
- ValueError: when attempting to connect to an in-memory SQLite
228
+ ValueError: when attempting to connect to a non Postgresql
261
229
  database.
262
230
 
263
231
  .. _docs: https://docs.sqlalchemy.org/en/14/core/engines.html#use-the-connect-args-dictionary-parameter
264
232
 
265
233
  """
234
+ # Disallow sqlite for the scheduler db
235
+ # theres no reason to support a non production ready scheduler implementation
236
+
266
237
  # Disallow sqlite in-memory because multi-threaded access to it is
267
238
  # complex and potentially problematic at best
268
239
  # ref: https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#threading-pooling-behavior
269
- if is_sqlite_inmemory_connection_string(connection_string):
270
- raise ValueError(
271
- "Cannot use SQLite in-memory with BuildGrid "
272
- f"(connection_string=[{connection_string}]). Use a file or "
273
- "leave the connection_string empty for a tempfile."
274
- )
275
240
 
276
- # Deprecate sqlite for the scheduler db
277
- if is_sqlite_connection_string(connection_string):
278
- LOGGER.warning(
279
- "\n\n"
280
- "Using deprecated dialect SQLite.\n"
281
- "\n"
282
- "Deployments, CI pipelines and developer setups will need to switch any SQLite storage"
283
- "(cas storage, cas index, job scheduler) over to Postgresql before updating.\n"
284
- "\n"
285
- "For non production use a preconfigured docker buildgrid database is available from:"
286
- "registry.gitlab.com/buildgrid/buildgrid.hub.docker.com/buildgrid-postgres:nightly"
287
- "an example compose file for the database can be found at "
288
- "https://gitlab.com/BuildGrid/buildgrid.hub.docker.com/-/blob/master/Composefile.buildbox.yml?ref_type=heads\n" # noqa: E501
241
+ # Ensure only postgres is supported
242
+
243
+ if not is_postgresql_connection_string(connection_string):
244
+ raise ValueError(
245
+ f"Cannot use database (connection_string=[{connection_string}]).\n\n{USE_POSTGRES_MESSAGE}"
289
246
  )
290
247
 
291
248
  extra_engine_args: dict[str, Any] = {}
@@ -295,12 +252,9 @@ class SqlProvider:
295
252
  extra_engine_args["connect_args"] = {}
296
253
 
297
254
  if connection_timeout > 0:
298
- if is_sqlite_connection_string(connection_string):
299
- extra_engine_args["connect_args"]["timeout"] = connection_timeout
300
- elif is_psycopg2_connection_string(connection_string):
301
- extra_engine_args["connect_args"]["connect_timeout"] = connection_timeout
302
- if lock_timeout > 0 and is_psycopg2_connection_string(connection_string):
303
- # Additional postgres specific timeouts
255
+ extra_engine_args["connect_args"]["connect_timeout"] = connection_timeout
256
+ if lock_timeout > 0:
257
+ # Additional timeouts
304
258
  # Additional libpg options
305
259
  # Note that those timeouts are in milliseconds (so *1000)
306
260
  # User might specifically set options... do not override in this case.
@@ -326,10 +280,6 @@ class SqlProvider:
326
280
  engine = create_engine(connection_string, echo=False, future=True, **extra_engine_args)
327
281
  self._session_factory.configure(bind=engine)
328
282
 
329
- # Register sqlite-specific connection callback.
330
- if engine.dialect.name == "sqlite":
331
- event.listen(engine, "connect", _sqlite_on_connect)
332
-
333
283
  return engine
334
284
 
335
285
  @property
@@ -347,7 +297,6 @@ class SqlProvider:
347
297
  self,
348
298
  *,
349
299
  scoped: bool = False,
350
- sqlite_lock_immediately: bool = False,
351
300
  exceptions_to_not_raise_on: list[type[Exception]] | None = None,
352
301
  exceptions_to_not_rollback_on: list[type[Exception]] | None = None,
353
302
  expire_on_commit: bool = True,
@@ -362,10 +311,6 @@ class SqlProvider:
362
311
  if the underlying connection pool has recently been disposed of and
363
312
  refreshed due to connectivity issues.
364
313
 
365
- When ``sqlite_lock_immediately`` is ``True``, the Session will not
366
- yield until the database has been locked by entering into a write
367
- transaction when using SQLite.
368
-
369
314
  If an Exception is raised whilst in the managed context, the ongoing
370
315
  database transaction is rolled back, and the Exception is reraised.
371
316
  Some Exceptions which suggest a transient connection issue with the
@@ -386,13 +331,6 @@ class SqlProvider:
386
331
  session. This results in reuse of the underlying Session object
387
332
  in a given thread.
388
333
 
389
- sqlite_lock_immediately: If true, execute a ``BEGIN IMMEDIATE``
390
- statement as soon as the session is created when using SQLite.
391
- This allows locking for the lifetime of the ``Session`` within
392
- this ContextManager, enabling similar behaviour to
393
- ``SELECT ... FOR UPDATE`` in other dialects. Defaults to
394
- ``False``.
395
-
396
334
  exceptions_to_not_raise_on: The list of error types to be suppressed
397
335
  within the context rather than re-raised. Defaults to ``None``,
398
336
  meaning all SQLAlchemyErrors will be re-raised.
@@ -442,8 +380,6 @@ class SqlProvider:
442
380
  # Try to obtain a session
443
381
  try:
444
382
  session = factory() if scoped else factory(expire_on_commit=expire_on_commit)
445
- if sqlite_lock_immediately and session.bind.name == "sqlite": # type: ignore
446
- session.execute(text("BEGIN IMMEDIATE"))
447
383
  except Exception as e:
448
384
  LOGGER.error("Unable to obtain a database session.", exc_info=True)
449
385
  raise DatabaseError("Unable to obtain a database session.") from e
@@ -489,7 +425,6 @@ class SqlProvider:
489
425
  def scoped_session(
490
426
  self,
491
427
  *,
492
- sqlite_lock_immediately: bool = False,
493
428
  exceptions_to_not_raise_on: list[type[Exception]] | None = None,
494
429
  exceptions_to_not_rollback_on: list[type[Exception]] | None = None,
495
430
  ) -> Generator[Session, None, None]:
@@ -512,7 +447,6 @@ class SqlProvider:
512
447
  """
513
448
  with self.session(
514
449
  scoped=True,
515
- sqlite_lock_immediately=sqlite_lock_immediately,
516
450
  exceptions_to_not_raise_on=exceptions_to_not_raise_on,
517
451
  exceptions_to_not_rollback_on=exceptions_to_not_rollback_on,
518
452
  ) as session:
@@ -89,14 +89,16 @@ LIST_OPERATIONS_SORT_KEYS = {
89
89
  "command": SortKeySpec("command", JobEntry.__tablename__),
90
90
  }
91
91
 
92
-
93
- def is_sqlite_connection_string(connection_string: str) -> bool:
94
- if connection_string:
95
- return connection_string.startswith("sqlite")
96
- return False
92
+ USE_POSTGRES_MESSAGE = (
93
+ "For production use setup a postgresql database.\n"
94
+ "For CI and local development use the preconfigured docker buildgrid database from:\n"
95
+ "registry.gitlab.com/buildgrid/buildgrid.hub.docker.com/buildgrid-postgres:nightly\n"
96
+ "an example compose file for the database can be found at \n"
97
+ "https://gitlab.com/BuildGrid/buildgrid.hub.docker.com/-/blob/master/Composefile.buildbox.yml?ref_type=heads\n" # noqa: E501
98
+ )
97
99
 
98
100
 
99
- def is_psycopg2_connection_string(connection_string: str) -> bool:
101
+ def is_postgresql_connection_string(connection_string: str) -> bool:
100
102
  if connection_string:
101
103
  if connection_string.startswith("postgresql:"):
102
104
  return True
@@ -105,35 +107,6 @@ def is_psycopg2_connection_string(connection_string: str) -> bool:
105
107
  return False
106
108
 
107
109
 
108
- def is_sqlite_inmemory_connection_string(full_connection_string: str) -> bool:
109
- if is_sqlite_connection_string(full_connection_string):
110
- # Valid connection_strings for in-memory SQLite which we don't support could look like:
111
- # "sqlite:///file:memdb1?option=value&cache=shared&mode=memory",
112
- # "sqlite:///file:memdb1?mode=memory&cache=shared",
113
- # "sqlite:///file:memdb1?cache=shared&mode=memory",
114
- # "sqlite:///file::memory:?cache=shared",
115
- # "sqlite:///file::memory:",
116
- # "sqlite:///:memory:",
117
- # "sqlite:///",
118
- # "sqlite://"
119
- # ref: https://www.sqlite.org/inmemorydb.html
120
- # Note that a user can also specify drivers, so prefix could become 'sqlite+driver:///'
121
- connection_string = full_connection_string
122
-
123
- uri_split_index = connection_string.find("?")
124
- if uri_split_index != -1:
125
- connection_string = connection_string[0:uri_split_index]
126
-
127
- if connection_string.endswith((":memory:", ":///", "://")):
128
- return True
129
- elif uri_split_index != -1:
130
- opts = full_connection_string[uri_split_index + 1 :].split("&")
131
- if "mode=memory" in opts:
132
- return True
133
-
134
- return False
135
-
136
-
137
110
  class SQLPoolDisposeHelper:
138
111
  """Helper class for disposing of SQL session connections"""
139
112
 
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
 
15
15
 
16
- __version__ = "0.2.53"
16
+ __version__ = "0.3.1"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: buildgrid
3
- Version: 0.2.53
3
+ Version: 0.3.1
4
4
  Summary: A remote execution service
5
5
  License: Apache License, Version 2.0
6
6
  Project-URL: Homepage, https://buildgrid.build
@@ -42,7 +42,7 @@ Requires-Dist: fakeredis>=2.10.1; extra == "redis"
42
42
  Requires-Dist: redis>=4.5.1; extra == "redis"
43
43
  Requires-Dist: hiredis; extra == "redis"
44
44
  Provides-Extra: docs
45
- Requires-Dist: Sphinx; extra == "docs"
45
+ Requires-Dist: Sphinx<=8; extra == "docs"
46
46
  Requires-Dist: sphinx-click; extra == "docs"
47
47
  Requires-Dist: sphinx-rtd-theme; extra == "docs"
48
48
  Requires-Dist: sphinxcontrib-apidoc; extra == "docs"
@@ -77,6 +77,7 @@ Requires-Dist: pytest-xdist; extra == "dev"
77
77
  Requires-Dist: memray; extra == "dev"
78
78
  Requires-Dist: ruff; extra == "dev"
79
79
  Requires-Dist: grpcio-tools; extra == "dev"
80
+ Requires-Dist: bump4version; extra == "dev"
80
81
  Provides-Extra: mypy
81
82
  Requires-Dist: mypy; extra == "mypy"
82
83
  Requires-Dist: SQLAlchemy[mypy]; extra == "mypy"
@@ -208,7 +208,7 @@ buildgrid/server/servicer.py,sha256=oqU9MaSxxHTDmSxobFTo9YmJctaUCklE2Dj-vfYWKkc,
208
208
  buildgrid/server/settings.py,sha256=Iy4eS9Putr5GroIVqYNeTcRY7gbvq29wgQoMDQgPJtA,5604
209
209
  buildgrid/server/threading.py,sha256=4QKQYev2KoO2Q-S_OyaoR9qpWyDTVzGMWVe9o2a1yIU,4743
210
210
  buildgrid/server/types.py,sha256=xG3bx64pbWMuEwXLuI0o8c2unt2rU2C4zsmUfmMT12c,1323
211
- buildgrid/server/version.py,sha256=zv7pfyNMnfQiih7uFjY-_UFi2yhATpDRA43o8Nz28fo,604
211
+ buildgrid/server/version.py,sha256=n9eZStzsUHVKCm33O5kD4RgMgRgIUVgOkmGO0lowsqw,603
212
212
  buildgrid/server/actioncache/__init__.py,sha256=g9lb8Sn7NY5KOjkMr9GQoJovCVDEg_Fxz_EhdDbhP1I,579
213
213
  buildgrid/server/actioncache/instance.py,sha256=UCR7ZGkv4fJOXjeIILMAdTSFWcGgBSYlBg8fMaPJpaI,3139
214
214
  buildgrid/server/actioncache/service.py,sha256=WcikJAzFYOYX-tgiOfGGcOnPoubrCd4yP-EhKCHEW0c,2021
@@ -239,9 +239,9 @@ buildgrid/server/app/commands/rpc_utils.py,sha256=3C02_0Ba4Weksb2kX5mANSubmfWAFR
239
239
  buildgrid/server/app/settings/__init__.py,sha256=g9lb8Sn7NY5KOjkMr9GQoJovCVDEg_Fxz_EhdDbhP1I,579
240
240
  buildgrid/server/app/settings/config.py,sha256=3cmmnkvZtM44zXkc6meNpmA6rJoDBbrridHoj-Quapo,9649
241
241
  buildgrid/server/app/settings/mapper.py,sha256=sKD3LWyVXZWaFF16_JN1lbJkUE8vp5p06OJGOencxmU,752
242
- buildgrid/server/app/settings/parser.py,sha256=8hIlclUweGO9kcWdOEy9f_TeEshofjJhH56dwnKXFak,96742
242
+ buildgrid/server/app/settings/parser.py,sha256=i8B1Zy6sR2o3n4z8H_e_QlszMTKOiLFPxonVE25MkOo,96439
243
243
  buildgrid/server/app/settings/reference.yml,sha256=DTXNJ6TkccgBLcvzj5264XAv6HFlvsc5Lx6iowixHPE,10703
244
- buildgrid/server/app/settings/schema.yml,sha256=1Jgvo-ruEVXQhDK_g323-OjSV7-QGZ_D_86ch-7pZCs,32765
244
+ buildgrid/server/app/settings/schema.yml,sha256=DqWE6UaNYiJl0Ae5DSmDtU8CijLXeXAeSK9Qal0jwTA,32664
245
245
  buildgrid/server/auth/__init__.py,sha256=avyRxG9BSFd76mCHnGYInDDTqyqDn0UlhOuFzAkPQSs,579
246
246
  buildgrid/server/auth/config.py,sha256=sWcaSL9Oz7mLVnFBuFT7OaOGlxFd6hxEb-z30n3uTNU,2291
247
247
  buildgrid/server/auth/enums.py,sha256=O902XU3_C4rqWz23qVipEyQMcXXr1h8XCiPWmrs1_lc,1905
@@ -268,16 +268,13 @@ buildgrid/server/cas/storage/replicated.py,sha256=DF_oku2QJAigiRTz6358ZBy4LzQiIH
268
268
  buildgrid/server/cas/storage/s3.py,sha256=fHsbNBYBN6x2DnlG22_UA4GMjcqGEMlOU6yIHueE5mc,20058
269
269
  buildgrid/server/cas/storage/sharded.py,sha256=WYehvpn1AD-pvGsZDjzIZQRLjCyw7eEEjHymYoMzg2Q,7076
270
270
  buildgrid/server/cas/storage/size_differentiated.py,sha256=puT7xMhT_0T1hKGJf_kjjbCcYsmfhQnsEvsRClmk59Y,8223
271
- buildgrid/server/cas/storage/sql.py,sha256=T4huxHnZMxR5JnalonzSI45H_tWgICxF9dl3LOjWpYo,10274
271
+ buildgrid/server/cas/storage/sql.py,sha256=ERiHfqF9DA1Tu75QxHP2okAtQB3oA-DCFA95D6KQ6Js,9589
272
272
  buildgrid/server/cas/storage/storage_abc.py,sha256=BTLNiAr31amzz22mcLy8ctM9lCcNypL79iD3hPmIUMI,7728
273
273
  buildgrid/server/cas/storage/with_cache.py,sha256=IB-pq5S6R6V037RQiS4jGk9Jm5Wj_Qdy7WChs0xiDqI,8132
274
274
  buildgrid/server/cas/storage/index/__init__.py,sha256=adgShFqjP778F2KJoM-z7stkT9V_3BPuc3uodlf1buw,579
275
275
  buildgrid/server/cas/storage/index/index_abc.py,sha256=JaFHjnePcazCWqCwsrlBYybArc4d4KBM6Utv4xyK7gI,3189
276
276
  buildgrid/server/cas/storage/index/redis.py,sha256=nJqZ9HEk65zxNnrD-7sQ6aS5PGy5l55p6-Rs-oSRSZY,16556
277
- buildgrid/server/cas/storage/index/sql.py,sha256=EAYZ9rhbaLbPDtu7m0At5wmxn5-If99sBY16fA3KSZI,40485
278
- buildgrid/server/cas/storage/index/sql_dialect_delegates/__init__.py,sha256=2J1pwjLJgO2wScTgQ5p7KaqGxdfKZXf4ouu6oKn5SVM,146
279
- buildgrid/server/cas/storage/index/sql_dialect_delegates/postgresqldelegate.py,sha256=tIXpp2wYgc3Hd5oXdZEAuhGLHrbDUe4EmKjgZPkik7Y,2316
280
- buildgrid/server/cas/storage/index/sql_dialect_delegates/sqlitedelegate.py,sha256=Pq7QAkT0GeHrTyDnUFd-9nJ6moFqynAEHDiGBBjjKcc,2300
277
+ buildgrid/server/cas/storage/index/sql.py,sha256=b01u2DdalQqMWMf9_SCE6l7i2Vp4_61pmeul7Qk8bWU,41016
281
278
  buildgrid/server/cleanup/__init__.py,sha256=TTer4pMMV4HOzR6rYI9yPzDlMXEWnY_SXczlHiFZCng,579
282
279
  buildgrid/server/cleanup/cleanup.py,sha256=Yyhc9LZpvIZnQNVn-mgnMfdPBrd1x9KMsqrHwmRhzf8,13606
283
280
  buildgrid/server/cleanup/janitor/__init__.py,sha256=XaoGqSD-oQwbfSl8OcR0U4pScRcBueB4sU1tpcZNGtk,579
@@ -289,7 +286,7 @@ buildgrid/server/cleanup/janitor/types.py,sha256=R3F9OcF1OsRncmHPz6k7G2iSdOxqOw9
289
286
  buildgrid/server/cleanup/janitor/utils.py,sha256=eYxuJZFJ-7Hhlrj3B3qJvL9X3Se0GEAlkNlXf88swYU,1410
290
287
  buildgrid/server/client/__init__.py,sha256=g9lb8Sn7NY5KOjkMr9GQoJovCVDEg_Fxz_EhdDbhP1I,579
291
288
  buildgrid/server/client/actioncache.py,sha256=2Xd7HzDRzMWwyiWN1nno1glkGEXrQtjmIFzsa0SHfyQ,5021
292
- buildgrid/server/client/asset.py,sha256=3iEaf1ggGH6xYnU9xdprXyPpvqyVId5IJGWDX54X9tA,4454
289
+ buildgrid/server/client/asset.py,sha256=eZdMuqwrJgPy9U7Q2Qe98P1Vzrry0jZUgxXqH18ru_A,4426
293
290
  buildgrid/server/client/auth_token_loader.py,sha256=B95uDkAsHg5p98-jZwBWZAkXEfLVWjgWEhdAC1k8fDU,1604
294
291
  buildgrid/server/client/authentication.py,sha256=6J-5PKR5AhBm_CTXWegAsHjUrmwny1wYVCEgfbMs9TM,5140
295
292
  buildgrid/server/client/capabilities.py,sha256=2fKyjy-BZK5szC_wQRmGc7bfMx2-G_zl5VZ2uHt6RA4,1687
@@ -334,18 +331,18 @@ buildgrid/server/scheduler/__init__.py,sha256=arCg8LWFATeX1tj-s0keVYP8p3wwrrUlCV
334
331
  buildgrid/server/scheduler/assigner.py,sha256=wHPAhyiQxYABZJXaUc2g5yFzM78Z0U5nvGV3X9h5pCM,10512
335
332
  buildgrid/server/scheduler/cohorts.py,sha256=L_5YZRiVOwPPGStfqnnQXknO5Ja-SC0vq0xjw4XgP-I,1426
336
333
  buildgrid/server/scheduler/events.py,sha256=cM7Z7Htr2pYKhltJxfg1YRo0q524yZaGm8yXvRehivk,1453
337
- buildgrid/server/scheduler/impl.py,sha256=MR3Hp0hxuYTpFu0hnriin2GcY1xgri_HAok4EaeToow,137584
338
- buildgrid/server/scheduler/notifier.py,sha256=uypoIXZowpAIsDQ728VCuFJ4MN2zJld1npnanCWHTrw,8854
334
+ buildgrid/server/scheduler/impl.py,sha256=jqwf4HU6otf0XLpq0hFUC8JhOgyFKPKYd2-x23biz1M,135712
335
+ buildgrid/server/scheduler/notifier.py,sha256=22ZsKwyf2oQirAjrwROkvgvr4C_TMUNyhOmtro4uM4I,7121
339
336
  buildgrid/server/scheduler/properties.py,sha256=2GydX8KUy9MFv1_JznIkGfWE_wOS0m_XapSv6Gp4pCM,11260
340
337
  buildgrid/server/sql/__init__.py,sha256=zbeeRP9BEeDzR-Mx2Ip6SUr49J8eeXsuREgljJTrHkk,579
341
- buildgrid/server/sql/models.py,sha256=Pd43qOhmtDDSV0AaHiZDeCPP09W4osiLMBLW3v_wU8w,14424
342
- buildgrid/server/sql/provider.py,sha256=xRF5_GDkz1u1VawcNzySLEqTpIV6FSzRsZIo8iDYEzM,22544
343
- buildgrid/server/sql/utils.py,sha256=yXG6wWIqKk2AP5nRpZ6AmuH8vuSQy-Zk3L_NeoRNKCg,17414
338
+ buildgrid/server/sql/models.py,sha256=jAuNLLvdlBWf7uZWlPLMSC1dTWHYlqlk9Pn5A04rQcg,14374
339
+ buildgrid/server/sql/provider.py,sha256=CXOpjcUY-InwmGnZTpCd4_ziOdROiDY34_SRpqsBwwk,18965
340
+ buildgrid/server/sql/utils.py,sha256=j76Z_qtGawz6O7vO4-zgCzBV4ylhKszcd_6iY0gV4W4,16470
344
341
  buildgrid/server/sql/alembic/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38
345
342
  buildgrid/server/sql/alembic/env.py,sha256=vRRLEpyPS_a-jeKKYvORE0NW2goBnjN7v62n7ix_t28,4534
346
343
  buildgrid/server/sql/alembic/script.py.mako,sha256=z6re8oZ_Qk3kZBPMRWWl0b0_sb29J9V7ruHwBlODDHw,1090
347
344
  buildgrid/server/sql/alembic/versions/0596ea8f5c61_add_bot_locality_hints_table_and_.py,sha256=lFYk8PPZs2TNBDKbDlYKcDlxqt4CQUO2Jts_sPymeao,2909
348
- buildgrid/server/sql/alembic/versions/0c17a7cb2bc5_initial_database_state.py,sha256=l2F2549VRygDOi_xuZ4Zal0S411JToWYlSy_L3eju-s,11713
345
+ buildgrid/server/sql/alembic/versions/0c17a7cb2bc5_initial_database_state.py,sha256=UlIiOq9d0D5pTXsdIr8k454yYvCv14uQ2XCYFzb2V3E,11433
349
346
  buildgrid/server/sql/alembic/versions/12992085e81a_add_a_job_index_on_worker_name_and_.py,sha256=Dg5kxHvR4REXmhzpd72EXriMZRCFzO9XRTPRKmWcluY,1681
350
347
  buildgrid/server/sql/alembic/versions/1f959c3834d3_drop_the_leases_table.py,sha256=V0X1XVVB_iq3nrwnVWDM7rhVrFDG3T8byZChsNwjcFA,1645
351
348
  buildgrid/server/sql/alembic/versions/22cc661efef9_add_instance_quotas_table.py,sha256=zBf23GKRMJq5scFjWC83oMHvKy7K55zOTyWYrbTuJtI,1702
@@ -368,9 +365,9 @@ buildgrid/server/utils/async_lru_cache.py,sha256=iLKeRPoZtZb1wC5AtcyQm8Wt0Bx-KZm
368
365
  buildgrid/server/utils/bots.py,sha256=c8hn7tbCecru-m2wicRmtKU5v5rSZPGlk97Yc6eUHgQ,1729
369
366
  buildgrid/server/utils/cancellation.py,sha256=pNETzKNoXg0AsXOXKCcLWlFl7SVKdkKinlqWl7MesRA,1703
370
367
  buildgrid/server/utils/digests.py,sha256=YNrWeHdbNp7OVTcsInjs30C33z_t9GQ_noMd14bpqPQ,2424
371
- buildgrid-0.2.53.dist-info/licenses/LICENSE,sha256=swa3Vs7GgALaG9p-e05M-WLkhd_U9QknacNkyVZ85xA,11338
372
- buildgrid-0.2.53.dist-info/METADATA,sha256=U9a7b52eyU6pWfE8PIMYE-Rl4TvhbC9d_Po0MEHnXG4,7040
373
- buildgrid-0.2.53.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
374
- buildgrid-0.2.53.dist-info/entry_points.txt,sha256=uyFAXiR9d6EDfSA5vWT8xskz6xalt4PdTuRruT6Q8rk,49
375
- buildgrid-0.2.53.dist-info/top_level.txt,sha256=T6TYhI_k6NTm2871tIxGCyBIqzlKxylgF9KDLU0Hi7o,10
376
- buildgrid-0.2.53.dist-info/RECORD,,
368
+ buildgrid-0.3.1.dist-info/licenses/LICENSE,sha256=swa3Vs7GgALaG9p-e05M-WLkhd_U9QknacNkyVZ85xA,11338
369
+ buildgrid-0.3.1.dist-info/METADATA,sha256=_Tt0d76-FzhNXCN5Zm7GNYMl0GgVS3br_ok2uQABmgA,7086
370
+ buildgrid-0.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
371
+ buildgrid-0.3.1.dist-info/entry_points.txt,sha256=uyFAXiR9d6EDfSA5vWT8xskz6xalt4PdTuRruT6Q8rk,49
372
+ buildgrid-0.3.1.dist-info/top_level.txt,sha256=T6TYhI_k6NTm2871tIxGCyBIqzlKxylgF9KDLU0Hi7o,10
373
+ buildgrid-0.3.1.dist-info/RECORD,,
@@ -1,4 +0,0 @@
1
- from .postgresqldelegate import PostgreSQLDelegate
2
- from .sqlitedelegate import SQLiteDelegate
3
-
4
- __all__ = ["PostgreSQLDelegate", "SQLiteDelegate"]
@@ -1,65 +0,0 @@
1
- # Copyright (C) 2020 Bloomberg LP
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # <http://www.apache.org/licenses/LICENSE-2.0>
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """
16
- PostgreSQLDelegate
17
- ==================
18
-
19
- Extra functionality for the SQL index when using a PostgreSQL backend.
20
-
21
- """
22
-
23
- from datetime import datetime
24
- from typing import cast
25
-
26
- from sqlalchemy import Table
27
- from sqlalchemy.dialects.postgresql import insert
28
- from sqlalchemy.orm.session import Session as SessionType
29
- from sqlalchemy.sql.functions import coalesce
30
-
31
- from buildgrid._protos.build.bazel.remote.execution.v2.remote_execution_pb2 import Digest
32
- from buildgrid.server.sql.models import IndexEntry
33
-
34
-
35
- class PostgreSQLDelegate:
36
- @staticmethod
37
- def _save_digests_to_index(
38
- digest_blob_pairs: list[tuple[Digest, bytes | None]], session: SessionType, max_inline_blob_size: int
39
- ) -> None:
40
- # See discussion of __table__ typing in https://github.com/sqlalchemy/sqlalchemy/issues/9130
41
- index_table = cast(Table, IndexEntry.__table__)
42
- update_time = datetime.utcnow()
43
- new_rows = [
44
- {
45
- "digest_hash": digest.hash,
46
- "digest_size_bytes": digest.size_bytes,
47
- "accessed_timestamp": update_time,
48
- "inline_blob": (blob if digest.size_bytes <= max_inline_blob_size else None),
49
- "deleted": False,
50
- }
51
- for (digest, blob) in digest_blob_pairs
52
- ]
53
-
54
- base_insert_stmt = insert(index_table).values(new_rows)
55
-
56
- update_stmt = base_insert_stmt.on_conflict_do_update(
57
- index_elements=["digest_hash"],
58
- set_={
59
- "accessed_timestamp": update_time,
60
- "inline_blob": coalesce(base_insert_stmt.excluded.inline_blob, index_table.c.inline_blob),
61
- "deleted": False,
62
- },
63
- )
64
-
65
- session.execute(update_stmt)
@@ -1,65 +0,0 @@
1
- # Copyright (C) 2020 Bloomberg LP
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # <http://www.apache.org/licenses/LICENSE-2.0>
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """
16
- SQLiteDelegate
17
- ==================
18
-
19
- Extra functionality for the SQL index when using a SQLite backend.
20
-
21
- """
22
-
23
- from datetime import datetime
24
- from typing import cast
25
-
26
- from sqlalchemy import Table
27
- from sqlalchemy.dialects.sqlite import insert
28
- from sqlalchemy.orm.session import Session as SessionType
29
- from sqlalchemy.sql.functions import coalesce
30
-
31
- from buildgrid._protos.build.bazel.remote.execution.v2.remote_execution_pb2 import Digest
32
- from buildgrid.server.sql.models import IndexEntry
33
-
34
-
35
- class SQLiteDelegate:
36
- @staticmethod
37
- def _save_digests_to_index(
38
- digest_blob_pairs: list[tuple[Digest, bytes | None]], session: SessionType, max_inline_blob_size: int
39
- ) -> None:
40
- # See discussion of __table__ typing in https://github.com/sqlalchemy/sqlalchemy/issues/9130
41
- index_table = cast(Table, IndexEntry.__table__)
42
- update_time = datetime.utcnow()
43
- new_rows = [
44
- {
45
- "digest_hash": digest.hash,
46
- "digest_size_bytes": digest.size_bytes,
47
- "accessed_timestamp": update_time,
48
- "inline_blob": (blob if digest.size_bytes <= max_inline_blob_size else None),
49
- "deleted": False,
50
- }
51
- for (digest, blob) in digest_blob_pairs
52
- ]
53
-
54
- base_insert_stmt = insert(index_table).values(new_rows)
55
-
56
- update_stmt = base_insert_stmt.on_conflict_do_update(
57
- index_elements=["digest_hash"],
58
- set_={
59
- "accessed_timestamp": update_time,
60
- "inline_blob": coalesce(base_insert_stmt.excluded.inline_blob, index_table.c.inline_blob),
61
- "deleted": False,
62
- },
63
- )
64
-
65
- session.execute(update_stmt)