nucliadb 6.3.1.post3546__py3-none-any.whl → 6.3.1.post3557__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,9 +28,6 @@ create new shards in the remaining nodes.
28
28
 
29
29
  import logging
30
30
 
31
- from nucliadb.common import datamanagers
32
- from nucliadb.common.cluster.rollover import rollover_kb_index
33
- from nucliadb.common.cluster.settings import settings as cluster_settings
34
31
  from nucliadb.migrator.context import ExecutionContext
35
32
 
36
33
  logger = logging.getLogger(__name__)
@@ -39,45 +36,4 @@ logger = logging.getLogger(__name__)
39
36
  async def migrate(context: ExecutionContext) -> None: ...
40
37
 
41
38
 
42
- async def migrate_kb(context: ExecutionContext, kbid: str) -> None:
43
- """
44
- Rollover KB shards if any of the shards are on the nodes to drain
45
- """
46
- drain_node_ids = cluster_settings.drain_nodes
47
- if len(drain_node_ids) == 0:
48
- logger.info("Skipping migration because no drain_nodes are set")
49
- return
50
-
51
- if not await kb_has_shards_on_drain_nodes(kbid, drain_node_ids):
52
- logger.info(
53
- "KB does not have shards on the nodes to drain, skipping rollover",
54
- extra={"kbid": kbid},
55
- )
56
- return
57
-
58
- logger.info("Rolling over affected KB", extra={"kbid": kbid})
59
- await rollover_kb_index(context, kbid, drain_nodes=drain_node_ids)
60
-
61
-
62
- async def kb_has_shards_on_drain_nodes(kbid: str, drain_node_ids: list[str]) -> bool:
63
- async with datamanagers.with_ro_transaction() as txn:
64
- shards = await datamanagers.cluster.get_kb_shards(txn, kbid=kbid)
65
- if not shards:
66
- logger.warning("Shards object not found", extra={"kbid": kbid})
67
- return False
68
- shard_in_drain_nodes = False
69
- for shard in shards.shards:
70
- for replica in shard.replicas:
71
- if replica.node in drain_node_ids:
72
- logger.info(
73
- "Shard found in drain nodes, will rollover it",
74
- extra={
75
- "kbid": kbid,
76
- "logical_shard": shard.shard,
77
- "replica_shard_id": replica.shard.id,
78
- "node": replica.node,
79
- "drain_node_ids": drain_node_ids,
80
- },
81
- )
82
- shard_in_drain_nodes = True
83
- return shard_in_drain_nodes
39
+ async def migrate_kb(context: ExecutionContext, kbid: str) -> None: ...
nucliadb/backups/const.py CHANGED
@@ -19,6 +19,9 @@
19
19
  #
20
20
 
21
21
 
22
+ from nucliadb.tasks.utils import NatsConsumer, NatsStream
23
+
24
+
22
25
  class MaindbKeys:
23
26
  METADATA = "kbs/{kbid}/backups/{backup_id}"
24
27
  LAST_RESTORED = "kbs/{kbid}/backup/{backup_id}/last_restored"
@@ -41,9 +44,8 @@ class BackupFinishedStream:
41
44
  subject = "backups.creation_finished"
42
45
 
43
46
 
44
- class BackupsNatsStream:
45
- name = "ndb-backups"
46
- stream_subjects = ["ndb-backups.>"]
47
- create_subject = "ndb-backups.create"
48
- delete_subject = "ndb-backups.delete"
49
- restore_subject = "ndb-backups.restore"
47
+ class BackupsNatsConfig:
48
+ stream = NatsStream(name="ndb-backups", subjects=["ndb-backups.>"])
49
+ create_consumer = NatsConsumer(subject="ndb-backups.create", group="ndb-backups-create")
50
+ delete_consumer = NatsConsumer(subject="ndb-backups.delete", group="ndb-backups-delete")
51
+ restore_consumer = NatsConsumer(subject="ndb-backups.restore", group="ndb-backups-restore")
nucliadb/backups/tasks.py CHANGED
@@ -19,7 +19,7 @@
19
19
  #
20
20
  from typing import Awaitable, Callable
21
21
 
22
- from nucliadb.backups.const import BackupsNatsStream
22
+ from nucliadb.backups.const import BackupsNatsConfig
23
23
  from nucliadb.backups.create import backup_kb_task
24
24
  from nucliadb.backups.delete import delete_backup
25
25
  from nucliadb.backups.models import CreateBackupRequest, DeleteBackupRequest, RestoreBackupRequest
@@ -33,9 +33,8 @@ from nucliadb.tasks.producer import NatsTaskProducer
33
33
  def creator_consumer() -> NatsTaskConsumer[CreateBackupRequest]:
34
34
  consumer: NatsTaskConsumer = create_consumer(
35
35
  name="backup_creator",
36
- stream=BackupsNatsStream.name,
37
- stream_subjects=BackupsNatsStream.stream_subjects,
38
- consumer_subject=BackupsNatsStream.create_subject,
36
+ stream=BackupsNatsConfig.stream,
37
+ consumer=BackupsNatsConfig.create_consumer,
39
38
  callback=backup_kb_task,
40
39
  msg_type=CreateBackupRequest,
41
40
  max_concurrent_messages=10,
@@ -46,9 +45,8 @@ def creator_consumer() -> NatsTaskConsumer[CreateBackupRequest]:
46
45
  async def create(kbid: str, backup_id: str) -> None:
47
46
  producer: NatsTaskProducer[CreateBackupRequest] = create_producer(
48
47
  name="backup_creator",
49
- stream=BackupsNatsStream.name,
50
- stream_subjects=BackupsNatsStream.stream_subjects,
51
- producer_subject=BackupsNatsStream.create_subject,
48
+ stream=BackupsNatsConfig.stream,
49
+ producer_subject=BackupsNatsConfig.create_consumer.subject,
52
50
  msg_type=CreateBackupRequest,
53
51
  )
54
52
  msg = CreateBackupRequest(
@@ -61,9 +59,8 @@ async def create(kbid: str, backup_id: str) -> None:
61
59
  def restorer_consumer() -> NatsTaskConsumer[RestoreBackupRequest]:
62
60
  consumer: NatsTaskConsumer = create_consumer(
63
61
  name="backup_restorer",
64
- stream=BackupsNatsStream.name,
65
- stream_subjects=BackupsNatsStream.stream_subjects,
66
- consumer_subject=BackupsNatsStream.restore_subject,
62
+ stream=BackupsNatsConfig.stream,
63
+ consumer=BackupsNatsConfig.restore_consumer,
67
64
  callback=restore_kb_task,
68
65
  msg_type=RestoreBackupRequest,
69
66
  max_concurrent_messages=10,
@@ -74,9 +71,8 @@ def restorer_consumer() -> NatsTaskConsumer[RestoreBackupRequest]:
74
71
  async def restore(kbid: str, backup_id: str) -> None:
75
72
  producer: NatsTaskProducer[RestoreBackupRequest] = create_producer(
76
73
  name="backup_restorer",
77
- stream=BackupsNatsStream.name,
78
- stream_subjects=BackupsNatsStream.stream_subjects,
79
- producer_subject=BackupsNatsStream.restore_subject,
74
+ stream=BackupsNatsConfig.stream,
75
+ producer_subject=BackupsNatsConfig.restore_consumer.subject,
80
76
  msg_type=RestoreBackupRequest,
81
77
  )
82
78
  msg = RestoreBackupRequest(
@@ -89,9 +85,8 @@ async def restore(kbid: str, backup_id: str) -> None:
89
85
  def deleter_consumer() -> NatsTaskConsumer[DeleteBackupRequest]:
90
86
  consumer: NatsTaskConsumer = create_consumer(
91
87
  name="backup_deleter",
92
- stream=BackupsNatsStream.name,
93
- stream_subjects=BackupsNatsStream.stream_subjects,
94
- consumer_subject=BackupsNatsStream.delete_subject,
88
+ stream=BackupsNatsConfig.stream,
89
+ consumer=BackupsNatsConfig.delete_consumer,
95
90
  callback=delete_backup,
96
91
  msg_type=DeleteBackupRequest,
97
92
  max_concurrent_messages=2,
@@ -102,9 +97,8 @@ def deleter_consumer() -> NatsTaskConsumer[DeleteBackupRequest]:
102
97
  async def delete(backup_id: str) -> None:
103
98
  producer: NatsTaskProducer[DeleteBackupRequest] = create_producer(
104
99
  name="backup_deleter",
105
- stream=BackupsNatsStream.name,
106
- stream_subjects=BackupsNatsStream.stream_subjects,
107
- producer_subject=BackupsNatsStream.delete_subject,
100
+ stream=BackupsNatsConfig.stream,
101
+ producer_subject=BackupsNatsConfig.delete_consumer.subject,
108
102
  msg_type=DeleteBackupRequest,
109
103
  )
110
104
  msg = DeleteBackupRequest(
@@ -24,12 +24,6 @@ from pydantic import Field
24
24
  from pydantic_settings import BaseSettings
25
25
 
26
26
 
27
- class ClusterDiscoveryMode(str, enum.Enum):
28
- MANUAL = "manual"
29
- KUBERNETES = "kubernetes"
30
- SINGLE_NODE = "single_node"
31
-
32
-
33
27
  class StandaloneNodeRole(enum.Enum):
34
28
  ALL = "all"
35
29
  INDEX = "index"
@@ -39,53 +33,20 @@ class StandaloneNodeRole(enum.Enum):
39
33
  class Settings(BaseSettings):
40
34
  data_path: str = "./data/node"
41
35
  standalone_mode: bool = False
42
- standalone_node_port: int = Field(
43
- default=10009,
44
- title="Standalone node port",
45
- description="Port to use for standalone nodes to communication with each other through",
46
- )
47
36
  standalone_node_role: StandaloneNodeRole = StandaloneNodeRole.ALL
48
37
 
49
- node_replicas: int = 2
50
-
51
- node_writer_port: int = 10000
52
- node_reader_port: int = 10001
53
-
54
- # Only for testing purposes
55
- writer_port_map: dict[str, int] = {}
56
- reader_port_map: dict[str, int] = {}
57
-
58
- # Node limits
38
+ # Index limits
59
39
  max_shard_paragraphs: int = Field(
60
40
  default=500_000,
61
41
  title="Max shard paragraphs",
62
42
  description="Maximum number of paragraphs to target per shard",
63
43
  )
64
- max_node_replicas: int = Field(
65
- default=800,
66
- title="Max node replicas",
67
- description="Maximum number of shard replicas a single node will manage",
68
- )
69
44
  max_resource_paragraphs: int = Field(
70
45
  default=50_000,
71
46
  title="Max paragraphs per resource",
72
47
  description="Maximum number of paragraphs allowed on a single resource",
73
48
  )
74
49
 
75
- drain_nodes: list[str] = Field(
76
- default=[],
77
- title="Drain nodes",
78
- description="List of node IDs to ignore when creating new shards. It is used for draining nodes from a cluster. Example: ['1bf3bfe7-e164-4a19-a4d9-41372fc15aca',]", # noqa: E501
79
- )
80
-
81
- local_reader_threads: int = 5
82
- local_writer_threads: int = 5
83
-
84
- cluster_discovery_mode: ClusterDiscoveryMode = ClusterDiscoveryMode.KUBERNETES
85
- cluster_discovery_kubernetes_namespace: str = "nucliadb"
86
- cluster_discovery_kubernetes_selector: str = "appType=node"
87
- cluster_discovery_manual_addresses: list[str] = []
88
-
89
50
  nidx_api_address: Optional[str] = Field(default=None, description="NIDX gRPC API address")
90
51
  nidx_searcher_address: Optional[str] = Field(
91
52
  default=None, description="NIDX gRPC searcher API address"
@@ -24,15 +24,36 @@ from nucliadb.export_import.models import NatsTaskMessage
24
24
  from nucliadb.tasks import create_consumer, create_producer
25
25
  from nucliadb.tasks.consumer import NatsTaskConsumer
26
26
  from nucliadb.tasks.producer import NatsTaskProducer
27
- from nucliadb_utils import const
27
+ from nucliadb.tasks.utils import NatsConsumer, NatsStream
28
+
29
+
30
+ class ExportsNatsConfig:
31
+ stream = NatsStream(
32
+ name="ndb-exports",
33
+ subjects=["ndb-exports"],
34
+ )
35
+ consumer = NatsConsumer(
36
+ subject="ndb-exports",
37
+ group="ndb-exports",
38
+ )
39
+
40
+
41
+ class ImportsNatsConfig:
42
+ stream = NatsStream(
43
+ name="ndb-imports",
44
+ subjects=["ndb-imports"],
45
+ )
46
+ consumer = NatsConsumer(
47
+ subject="ndb-imports",
48
+ group="ndb-imports",
49
+ )
28
50
 
29
51
 
30
52
  def get_exports_consumer() -> NatsTaskConsumer[NatsTaskMessage]:
31
53
  return create_consumer(
32
54
  name="exports_consumer",
33
- stream=const.Streams.KB_EXPORTS.name,
34
- stream_subjects=[const.Streams.KB_EXPORTS.subject],
35
- consumer_subject=const.Streams.KB_EXPORTS.subject,
55
+ stream=ExportsNatsConfig.stream,
56
+ consumer=ExportsNatsConfig.consumer,
36
57
  callback=export_kb_to_blob_storage,
37
58
  msg_type=NatsTaskMessage,
38
59
  max_concurrent_messages=10,
@@ -42,9 +63,8 @@ def get_exports_consumer() -> NatsTaskConsumer[NatsTaskMessage]:
42
63
  async def get_exports_producer(context: ApplicationContext) -> NatsTaskProducer[NatsTaskMessage]:
43
64
  producer = create_producer(
44
65
  name="exports_producer",
45
- stream=const.Streams.KB_EXPORTS.name,
46
- stream_subjects=[const.Streams.KB_EXPORTS.subject],
47
- producer_subject=const.Streams.KB_EXPORTS.subject,
66
+ stream=ExportsNatsConfig.stream,
67
+ producer_subject=ExportsNatsConfig.consumer.subject,
48
68
  msg_type=NatsTaskMessage,
49
69
  )
50
70
  await producer.initialize(context)
@@ -54,9 +74,8 @@ async def get_exports_producer(context: ApplicationContext) -> NatsTaskProducer[
54
74
  def get_imports_consumer() -> NatsTaskConsumer[NatsTaskMessage]:
55
75
  return create_consumer(
56
76
  name="imports_consumer",
57
- stream=const.Streams.KB_IMPORTS.name,
58
- stream_subjects=[const.Streams.KB_IMPORTS.subject],
59
- consumer_subject=const.Streams.KB_IMPORTS.subject,
77
+ stream=ImportsNatsConfig.stream,
78
+ consumer=ImportsNatsConfig.consumer,
60
79
  callback=import_kb_from_blob_storage,
61
80
  msg_type=NatsTaskMessage,
62
81
  max_concurrent_messages=10,
@@ -66,9 +85,8 @@ def get_imports_consumer() -> NatsTaskConsumer[NatsTaskMessage]:
66
85
  async def get_imports_producer(context: ApplicationContext) -> NatsTaskProducer[NatsTaskMessage]:
67
86
  producer = create_producer(
68
87
  name="imports_producer",
69
- stream=const.Streams.KB_IMPORTS.name,
70
- stream_subjects=[const.Streams.KB_IMPORTS.subject],
71
- producer_subject=const.Streams.KB_IMPORTS.subject,
88
+ stream=ImportsNatsConfig.stream,
89
+ producer_subject=ImportsNatsConfig.consumer.subject,
72
90
  msg_type=NatsTaskMessage,
73
91
  )
74
92
  await producer.initialize(context)
@@ -22,7 +22,7 @@ import logging
22
22
  import os
23
23
 
24
24
  from nucliadb.common.cluster.settings import StandaloneNodeRole
25
- from nucliadb.standalone.settings import Settings, StandaloneDiscoveryMode
25
+ from nucliadb.standalone.settings import Settings
26
26
 
27
27
  logger = logging.getLogger(__name__)
28
28
 
@@ -76,7 +76,6 @@ def config_nucliadb(nucliadb_args: Settings):
76
76
  use some specific settings.
77
77
  """
78
78
 
79
- from nucliadb.common.cluster.settings import ClusterDiscoveryMode
80
79
  from nucliadb.common.cluster.settings import settings as cluster_settings
81
80
  from nucliadb.ingest.settings import settings as ingest_settings
82
81
  from nucliadb.train.settings import settings as train_settings
@@ -91,14 +90,8 @@ def config_nucliadb(nucliadb_args: Settings):
91
90
 
92
91
  cluster_settings.standalone_mode = True
93
92
  cluster_settings.data_path = nucliadb_args.data_path
94
- cluster_settings.standalone_node_port = nucliadb_args.standalone_node_port
95
93
  cluster_settings.standalone_node_role = nucliadb_args.standalone_node_role
96
94
 
97
- if nucliadb_args.cluster_discovery_mode == StandaloneDiscoveryMode.DEFAULT:
98
- # default for standalone is single node
99
- cluster_settings.cluster_discovery_mode = ClusterDiscoveryMode.SINGLE_NODE
100
- cluster_settings.node_replicas = 1
101
-
102
95
  ingest_settings.nuclia_partitions = 1
103
96
  ingest_settings.replica_number = 0
104
97
  ingest_settings.partitions = ["1"]
@@ -99,10 +99,7 @@ def run():
99
99
  "Admin UI": f"http://{settings.http_host}:{settings.http_port}/admin",
100
100
  "Key-value backend": ingest_settings.driver.value,
101
101
  "Blob storage backend": storage_settings.file_backend.value,
102
- "Cluster discovery mode": cluster_settings.cluster_discovery_mode.value,
103
- "Node replicas": cluster_settings.node_replicas,
104
102
  "Index data path": os.path.realpath(cluster_settings.data_path),
105
- "Node port": cluster_settings.standalone_node_port,
106
103
  "Auth policy": settings.auth_policy.value,
107
104
  "Node role": cluster_settings.standalone_node_role.value,
108
105
  }
@@ -30,13 +30,6 @@ from nucliadb_utils.settings import StorageSettings
30
30
  from nucliadb_utils.storages.settings import Settings as ExtendedStorageSettings
31
31
 
32
32
 
33
- class StandaloneDiscoveryMode(Enum):
34
- DEFAULT = "default"
35
- MANUAL = "manual"
36
- KUBERNETES = "kubernetes"
37
- SINGLE_NODE = "single_node"
38
-
39
-
40
33
  class AuthPolicy(Enum):
41
34
  UPSTREAM_NAIVE = "upstream_naive"
42
35
  UPSTREAM_AUTH_HEADER = "upstream_auth_header"
@@ -60,8 +53,6 @@ class Settings(DriverSettings, StorageSettings, ExtendedStorageSettings):
60
53
  http_port: int = pydantic.Field(default=8080, description="HTTP Port")
61
54
  ingest_grpc_port: int = pydantic.Field(default=8030, description="Ingest GRPC Port")
62
55
  train_grpc_port: int = pydantic.Field(default=8031, description="Train GRPC Port")
63
- standalone_node_port: int = pydantic.Field(default=10009, description="Node GRPC Port")
64
-
65
56
  auth_policy: AuthPolicy = pydantic.Field(
66
57
  default=AuthPolicy.UPSTREAM_NAIVE,
67
58
  description="""Auth policy to use for http requests.
@@ -111,8 +102,6 @@ Examples:
111
102
  description="JWK key used for temporary token generation and validation.",
112
103
  )
113
104
 
114
- cluster_discovery_mode: StandaloneDiscoveryMode = StandaloneDiscoveryMode.DEFAULT
115
-
116
105
  fork: bool = pydantic.Field(default=False, description="Fork process on startup")
117
106
 
118
107
  # Standalone logging settings
@@ -28,7 +28,7 @@ from nats.aio.client import Msg
28
28
  from nucliadb.common.context import ApplicationContext
29
29
  from nucliadb.tasks.logger import logger
30
30
  from nucliadb.tasks.models import Callback, MsgType
31
- from nucliadb.tasks.utils import create_nats_stream_if_not_exists
31
+ from nucliadb.tasks.utils import NatsConsumer, NatsStream, create_nats_stream_if_not_exists
32
32
  from nucliadb_telemetry import errors
33
33
  from nucliadb_utils.nats import MessageProgressUpdater
34
34
  from nucliadb_utils.settings import nats_consumer_settings
@@ -40,17 +40,15 @@ class NatsTaskConsumer(Generic[MsgType]):
40
40
  def __init__(
41
41
  self,
42
42
  name: str,
43
- stream: str,
44
- stream_subjects: list[str],
45
- consumer_subject: str,
43
+ stream: NatsStream,
44
+ consumer: NatsConsumer,
46
45
  callback: Callback,
47
46
  msg_type: Type[MsgType],
48
47
  max_concurrent_messages: Optional[int] = None,
49
48
  ):
50
49
  self.name = name
51
50
  self.stream = stream
52
- self.stream_subjects = stream_subjects
53
- self.consumer_subject = consumer_subject
51
+ self.consumer = consumer
54
52
  self.callback = callback
55
53
  self.msg_type = msg_type
56
54
  self.max_concurrent_messages = max_concurrent_messages
@@ -61,7 +59,7 @@ class NatsTaskConsumer(Generic[MsgType]):
61
59
  async def initialize(self, context: ApplicationContext):
62
60
  self.context = context
63
61
  await create_nats_stream_if_not_exists(
64
- context, stream_name=self.stream, subjects=self.stream_subjects
62
+ context, stream_name=self.stream.name, subjects=self.stream.subjects
65
63
  )
66
64
  await self._setup_nats_subscription()
67
65
  self.initialized = True
@@ -80,17 +78,15 @@ class NatsTaskConsumer(Generic[MsgType]):
80
78
 
81
79
  async def _setup_nats_subscription(self):
82
80
  # Nats push consumer
83
- stream = self.stream
84
- subject = group = self.consumer_subject
85
81
  max_ack_pending = (
86
82
  self.max_concurrent_messages
87
83
  if self.max_concurrent_messages
88
84
  else nats_consumer_settings.nats_max_ack_pending
89
85
  )
90
86
  self.subscription = await self.context.nats_manager.subscribe(
91
- subject=subject,
92
- queue=group,
93
- stream=stream,
87
+ subject=self.consumer.subject,
88
+ queue=self.consumer.group,
89
+ stream=self.stream.name,
94
90
  cb=self._subscription_worker_as_task,
95
91
  subscription_lost_cb=self._setup_nats_subscription,
96
92
  manual_ack=True,
@@ -103,7 +99,7 @@ class NatsTaskConsumer(Generic[MsgType]):
103
99
  ),
104
100
  )
105
101
  logger.info(
106
- f"Subscribed to {subject} on stream {stream}",
102
+ f"Subscribed {self.consumer.group} to {self.consumer.subject} on stream {self.stream.name}",
107
103
  extra={"consumer_name": self.name},
108
104
  )
109
105
 
@@ -178,9 +174,8 @@ class NatsTaskConsumer(Generic[MsgType]):
178
174
 
179
175
  def create_consumer(
180
176
  name: str,
181
- stream: str,
182
- stream_subjects: list[str],
183
- consumer_subject: str,
177
+ stream: NatsStream,
178
+ consumer: NatsConsumer,
184
179
  callback: Callback,
185
180
  msg_type: Type[MsgType],
186
181
  max_concurrent_messages: Optional[int] = None,
@@ -188,13 +183,11 @@ def create_consumer(
188
183
  """
189
184
  Returns a non-initialized consumer
190
185
  """
191
- consumer = NatsTaskConsumer(
186
+ return NatsTaskConsumer(
192
187
  name=name,
193
188
  stream=stream,
194
- stream_subjects=stream_subjects,
195
- consumer_subject=consumer_subject,
189
+ consumer=consumer,
196
190
  callback=callback,
197
191
  msg_type=msg_type,
198
192
  max_concurrent_messages=max_concurrent_messages,
199
193
  )
200
- return consumer
@@ -22,7 +22,7 @@ from typing import Generic, Optional, Type
22
22
  from nucliadb.common.context import ApplicationContext
23
23
  from nucliadb.tasks.logger import logger
24
24
  from nucliadb.tasks.models import MsgType
25
- from nucliadb.tasks.utils import create_nats_stream_if_not_exists
25
+ from nucliadb.tasks.utils import NatsStream, create_nats_stream_if_not_exists
26
26
  from nucliadb_telemetry import errors
27
27
 
28
28
 
@@ -30,14 +30,12 @@ class NatsTaskProducer(Generic[MsgType]):
30
30
  def __init__(
31
31
  self,
32
32
  name: str,
33
- stream: str,
34
- stream_subjects: list[str],
33
+ stream: NatsStream,
35
34
  producer_subject: str,
36
35
  msg_type: Type[MsgType],
37
36
  ):
38
37
  self.name = name
39
38
  self.stream = stream
40
- self.stream_subjects = stream_subjects
41
39
  self.producer_subject = producer_subject
42
40
  self.msg_type = msg_type
43
41
  self.context: Optional[ApplicationContext] = None
@@ -47,8 +45,8 @@ class NatsTaskProducer(Generic[MsgType]):
47
45
  self.context = context
48
46
  await create_nats_stream_if_not_exists(
49
47
  self.context,
50
- self.stream,
51
- subjects=self.stream_subjects,
48
+ self.stream.name,
49
+ subjects=self.stream.subjects,
52
50
  )
53
51
  self.initialized = True
54
52
 
@@ -81,8 +79,7 @@ class NatsTaskProducer(Generic[MsgType]):
81
79
 
82
80
  def create_producer(
83
81
  name: str,
84
- stream: str,
85
- stream_subjects: list[str],
82
+ stream: NatsStream,
86
83
  producer_subject: str,
87
84
  msg_type: Type[MsgType],
88
85
  ) -> NatsTaskProducer[MsgType]:
@@ -92,7 +89,6 @@ def create_producer(
92
89
  producer = NatsTaskProducer[MsgType](
93
90
  name=name,
94
91
  stream=stream,
95
- stream_subjects=stream_subjects,
96
92
  producer_subject=producer_subject,
97
93
  msg_type=msg_type,
98
94
  )
nucliadb/tasks/utils.py CHANGED
@@ -18,6 +18,8 @@
18
18
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
19
19
  #
20
20
 
21
+ from dataclasses import dataclass
22
+
21
23
  import nats
22
24
 
23
25
  from nucliadb.common.context import ApplicationContext
@@ -31,3 +33,19 @@ async def create_nats_stream_if_not_exists(
31
33
  await js.stream_info(stream_name)
32
34
  except nats.js.errors.NotFoundError:
33
35
  await js.add_stream(name=stream_name, subjects=subjects)
36
+
37
+
38
+ @dataclass
39
+ class NatsStream:
40
+ name: str
41
+ subjects: list[str]
42
+
43
+
44
+ @dataclass
45
+ class NatsConsumer:
46
+ """
47
+ NOTE: groups can't contain '.', '*' or '>' characters.
48
+ """
49
+
50
+ subject: str
51
+ group: str
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: nucliadb
3
- Version: 6.3.1.post3546
3
+ Version: 6.3.1.post3557
4
4
  Summary: NucliaDB
5
5
  Author-email: Nuclia <nucliadb@nuclia.com>
6
6
  License: AGPL
@@ -20,11 +20,11 @@ Classifier: Programming Language :: Python :: 3.12
20
20
  Classifier: Programming Language :: Python :: 3 :: Only
21
21
  Requires-Python: <4,>=3.9
22
22
  Description-Content-Type: text/markdown
23
- Requires-Dist: nucliadb-telemetry[all]>=6.3.1.post3546
24
- Requires-Dist: nucliadb-utils[cache,fastapi,storages]>=6.3.1.post3546
25
- Requires-Dist: nucliadb-protos>=6.3.1.post3546
26
- Requires-Dist: nucliadb-models>=6.3.1.post3546
27
- Requires-Dist: nidx-protos>=6.3.1.post3546
23
+ Requires-Dist: nucliadb-telemetry[all]>=6.3.1.post3557
24
+ Requires-Dist: nucliadb-utils[cache,fastapi,storages]>=6.3.1.post3557
25
+ Requires-Dist: nucliadb-protos>=6.3.1.post3557
26
+ Requires-Dist: nucliadb-models>=6.3.1.post3557
27
+ Requires-Dist: nidx-protos>=6.3.1.post3557
28
28
  Requires-Dist: nucliadb-admin-assets>=1.0.0.post1224
29
29
  Requires-Dist: nuclia-models>=0.24.2
30
30
  Requires-Dist: uvicorn
@@ -16,7 +16,7 @@ migrations/0016_upgrade_to_paragraphs_v2.py,sha256=9eepvzme-nb_mw6rwIdjjJzbFcpOq
16
16
  migrations/0017_multiple_writable_shards.py,sha256=HSi-eXXI0kO9sLgunUMuVpFnYMjVhWTVfDS_lIjlkuM,2095
17
17
  migrations/0018_purge_orphan_kbslugs.py,sha256=ztEOAjqlWVagv1UMg_sOm8HaW6S9FoDOIg7-rZ05tro,2177
18
18
  migrations/0019_upgrade_to_paragraphs_v3.py,sha256=zP13_IKE7u4ox2gyc493L2_ewyiPlr7Csn5K6n5eylI,2479
19
- migrations/0020_drain_nodes_from_cluster.py,sha256=cgm_72kH57QiBbPx17Judn7Wp5hQnKn6UW_1Z37_8s8,3269
19
+ migrations/0020_drain_nodes_from_cluster.py,sha256=BeECAI0T8u14M2U5USl1fFNcsfmdMerNhisolYQN_eA,1411
20
20
  migrations/0021_overwrite_vectorsets_key.py,sha256=O6nb2a7kDFX9I3XFrVtudCUyKptpGyv2_GYvcvbQOI8,1583
21
21
  migrations/0022_fix_paragraph_deletion_bug.py,sha256=-tH342VXF-8xwc_h3P1cYaUtTT1wHSGf7ZoeVEpnaYs,1422
22
22
  migrations/0023_backfill_pg_catalog.py,sha256=gw22pU5cAtg2a7n7xVaVqT2itjAoDMNtzWwTllwqIvg,2993
@@ -40,13 +40,13 @@ nucliadb/metrics_exporter.py,sha256=6u0geEYFxgE5I2Fhl_sxsvGN-ZkaFZNGutSXwrzrsVs,
40
40
  nucliadb/openapi.py,sha256=wDiw0dVEvTpJvbatkJ0JZLkKm9RItZT5PWRHjqRfqTA,2272
41
41
  nucliadb/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  nucliadb/backups/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
43
- nucliadb/backups/const.py,sha256=llb5TaC53Ce6BMmlPKPUrVhVGl7uQrqv_Vle-P4GET4,1673
43
+ nucliadb/backups/const.py,sha256=9vPAhLxQO_gNAjSdPxWuv3V66s9WcdpjOQ89CZlfmuk,1894
44
44
  nucliadb/backups/create.py,sha256=AM_nC7TgHOX0EFGaTXClS28jBSK28fHrKNZi14z2wek,10442
45
45
  nucliadb/backups/delete.py,sha256=1rnBhVUGYYZJXSZUrrgYMDZ5NyswEWkIA-G-crRCyHk,2404
46
46
  nucliadb/backups/models.py,sha256=-hITU4Mv6AxePu12toBu_fjpEv6vVGcwNVxV22O9jQA,1273
47
47
  nucliadb/backups/restore.py,sha256=xhslVvTf4H8VmDucZpjrEFpKj6csPIWBadCPMVJYKQ8,9703
48
48
  nucliadb/backups/settings.py,sha256=SyzsInj1BRbBI0atg5IXWbMbOZ_eVg4eSQ3IcnUhCxQ,1357
49
- nucliadb/backups/tasks.py,sha256=QgGDBBWsTpD-jLcrRP-C897Zo1qvc_ux1b4L-SlCnd8,4530
49
+ nucliadb/backups/tasks.py,sha256=4_kOVJ2yCwMvDEpzJgTuTt75TNlpq5woyw9sTAcaSkw,4194
50
50
  nucliadb/backups/utils.py,sha256=ayDaxfWP5cPnAkQH-tF4M6cnowsPQgU2ljYz_iL1CbE,1249
51
51
  nucliadb/common/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
52
52
  nucliadb/common/constants.py,sha256=QpigxJh_CtD85Evy0PtV5cVq6x0U_f9xfIcXz1ymkUg,869
@@ -58,11 +58,10 @@ nucliadb/common/cluster/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIX
58
58
  nucliadb/common/cluster/base.py,sha256=kklDqyvsubNX0W494ttl9f3E58lGaX6AXqAd8XX8ZHE,5522
59
59
  nucliadb/common/cluster/exceptions.py,sha256=t7v_l93t44l2tQpdQXgO_w-c4YZRcaayOz1A2i0w4RQ,1258
60
60
  nucliadb/common/cluster/grpc_node_dummy.py,sha256=LxONv0mhDFhx7mI91qqGfQlQ-R0qOGDYaxhXoBHLXaE,3548
61
- nucliadb/common/cluster/index_node.py,sha256=g38H1kiAliF3Y6et_CWYInpn_xPxf7THAFJ7RtgLNZo,3246
62
61
  nucliadb/common/cluster/manager.py,sha256=KIzqAYGgdVK3GicJ9LdLoei8arWZ7H60imbc32USPj4,12754
63
62
  nucliadb/common/cluster/rebalance.py,sha256=cLUlR08SsqmnoA_9GDflV6k2tXmkAPpyFxZErzp45vo,8754
64
63
  nucliadb/common/cluster/rollover.py,sha256=iTJ9EQmHbzXL34foNFto-hqdC0Kq1pF1mNxqv0jqhBs,25362
65
- nucliadb/common/cluster/settings.py,sha256=TMoym-cZsQ2soWfLAce0moSa2XncttQyhahL43LrWTo,3384
64
+ nucliadb/common/cluster/settings.py,sha256=JPwV_0U_i618Tn66GWUq6qCKNjy4TWkGEGld9GwH5uk,2048
66
65
  nucliadb/common/cluster/utils.py,sha256=7nQvnVFxM4XV7J560R8hUA-GPzrgD19UlQxHrl4mZUc,4687
67
66
  nucliadb/common/cluster/standalone/__init__.py,sha256=itSI7dtTwFP55YMX4iK7JzdMHS5CQVUiB1XzQu4UBh8,833
68
67
  nucliadb/common/cluster/standalone/utils.py,sha256=af3r-x_GF7A6dwIAhZLR-r-SZQEVxsFrDKeMfUTA6G0,1908
@@ -109,11 +108,10 @@ nucliadb/export_import/exceptions.py,sha256=Dw8WqfG4r6MPJc5TPfbjMvCgXXWTcTOecGHR
109
108
  nucliadb/export_import/exporter.py,sha256=k2QVx1EjqFlDYiggriWiEJzwtMXzHbldsqWdpGQM3_U,7074
110
109
  nucliadb/export_import/importer.py,sha256=v5cq9Nn8c2zrY_K_00mydR52f8mdFxR7tLdtNLQ0qvk,4229
111
110
  nucliadb/export_import/models.py,sha256=dbjScNkiMRv4X3Ktudy1JRliD25bfoDTy3JmEZgQSCc,2121
112
- nucliadb/export_import/tasks.py,sha256=yPNdBdvTD7eGc7zvV9Rp7UZ0-mDhA34OOsLqHvns_v0,2975
111
+ nucliadb/export_import/tasks.py,sha256=4JX3bygyLCLSuGxMCStYyoclh_CL8rPxrVVWuGqvcmM,3146
113
112
  nucliadb/export_import/utils.py,sha256=iAQAjYuNx0dhM2b5-1A0NEs8tSRsznuT-izysUrTwS0,19986
114
113
  nucliadb/ingest/__init__.py,sha256=fsw3C38VP50km3R-nHL775LNGPpJ4JxqXJ2Ib1f5SqE,1011
115
114
  nucliadb/ingest/app.py,sha256=rX1KE5vsAzG9hlArBk8WE2SOlvdYylcb-jNkMQNPJdQ,7407
116
- nucliadb/ingest/cache.py,sha256=w7jMMzamOmQ7gwXna6Dqm6isRNBVv6l5BTBlTxaYWjE,1005
117
115
  nucliadb/ingest/partitions.py,sha256=2NIhMYbNT0TNBL6bX1UMSi7vxFGICstCKEqsB0TXHOE,2410
118
116
  nucliadb/ingest/processing.py,sha256=8OggvuxNzktTTKDTUwsIuazhDParEWhn46CBZaMYAy8,20659
119
117
  nucliadb/ingest/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -258,24 +256,24 @@ nucliadb/standalone/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20
258
256
  nucliadb/standalone/api_router.py,sha256=hgq9FXpihzgjHkwcVGfGCSwyXy67fqXTfLFHuINzIi0,5567
259
257
  nucliadb/standalone/app.py,sha256=mAApNK_iVsQgJyd-mtwCeZq5csSimwnXmlQGH9a70pE,5586
260
258
  nucliadb/standalone/auth.py,sha256=UwMv-TywhMZabvVg3anQLeCRdoHDnWf2o3luvnoNBjs,7670
261
- nucliadb/standalone/config.py,sha256=g9JBJQfyw87TYZ3yuy0O9WFVLd_MmCJxSRSI0E8FwZE,5396
259
+ nucliadb/standalone/config.py,sha256=hJ3p4dBRSsj5FOmIgAiEX9ZsAGUYd1W-_UJIol5LCCg,4967
262
260
  nucliadb/standalone/lifecycle.py,sha256=rdKLG-oOLN4rfd2VGG_2vlDUWYneWSCiuEhoeiFKfnM,2343
263
261
  nucliadb/standalone/migrations.py,sha256=s9-3RSZ-O3bjEw2TnBe_YWLUEKbub0bARUxi1gA3yuY,1950
264
262
  nucliadb/standalone/purge.py,sha256=ZY-cebb214FFiPG7OFmXZGg0G3CK5Amw0FLLm9WJhKE,1343
265
263
  nucliadb/standalone/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
266
- nucliadb/standalone/run.py,sha256=d4lmi9ePObbHeDqwcoZnp5JIkBp9iIPSw9uOwxeQMOU,5623
267
- nucliadb/standalone/settings.py,sha256=nPJ8R8yNUQD4G92zXBi5KiD6QWhXZdtLoQQUTXfuaHE,6041
264
+ nucliadb/standalone/run.py,sha256=0QKEAT6pCaLvnuxTG3RltTlhE2g5-HI21KbOWfusBGE,5425
265
+ nucliadb/standalone/settings.py,sha256=fbgqVT37XB2cJHJARnR19MO_dz6NLbkuIC2okH7J80o,5714
268
266
  nucliadb/standalone/versions.py,sha256=8CxNMNt2NgWM8ct50UsR4d44-ae7wtQI-sV-yGiFqyI,3508
269
267
  nucliadb/standalone/static/favicon.ico,sha256=96pKGp6Sx457JkTfjy1dtApMhkitixfU6invCUGAYOU,2285
270
268
  nucliadb/standalone/static/index.html,sha256=PEZfuEQFYnYACAL1ceN8xC0im8lBrUx838RkE8tbvgA,3833
271
269
  nucliadb/standalone/static/logo.svg,sha256=-wQqSvPGTdlKjUP6pHE6kiq005pgYjDzp9nPl0X71Mk,2639
272
270
  nucliadb/tasks/__init__.py,sha256=oFJ3A8HD7w11mBu-IixYE_KxA7juMGlYQb7YD_y6WPM,975
273
- nucliadb/tasks/consumer.py,sha256=x-999Nsw6lBcKvyGyCGPiGP_naANVYMfl9M-u0U3mhY,7052
271
+ nucliadb/tasks/consumer.py,sha256=xc0Ql3N1Iq52dJ3t4YYGJFj1NCQAly0J5W_brfLa_F8,6894
274
272
  nucliadb/tasks/logger.py,sha256=C7keOEO_mjLVp5VbqAZ2QXfqVB2Hot7NgBlUP_SDSMw,924
275
273
  nucliadb/tasks/models.py,sha256=qrZKi5DNDQ07waMsp5L4_Fi7WRs57YiO-kmXlrBzEAA,1168
276
- nucliadb/tasks/producer.py,sha256=w4R1YXgXtmCPGcoNNOr3qkqJYcHJtSmix-xjt7vsPqk,3261
274
+ nucliadb/tasks/producer.py,sha256=JRGnATkALyr_iLHq0OAjzVbfxZ_SOUa6sx-smU5p6SQ,3136
277
275
  nucliadb/tasks/retries.py,sha256=Zv-3Hys-SKayG9VQ7_7EIflkegE5j-xPGrf-nwaxsfY,5075
278
- nucliadb/tasks/utils.py,sha256=6tQVckqyzxv8PhVAd3ZqcMYpGcn73ZY6p1cpm1FxagA,1214
276
+ nucliadb/tasks/utils.py,sha256=tV1AbWdFc3qfIULX44Veqj41FCD1B6XYjG6brULBeiw,1459
279
277
  nucliadb/tests/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
280
278
  nucliadb/tests/config.py,sha256=JN_Jhgj-fwM9_8IeO9pwxr6C1PiwRDrXxm67Y38rU30,2080
281
279
  nucliadb/tests/vectors.py,sha256=CcNKx-E8LPpyvRyljbmb-Tn_wST9Juw2CBoogWrKiTk,62843
@@ -349,8 +347,8 @@ nucliadb/writer/tus/local.py,sha256=7jYa_w9b-N90jWgN2sQKkNcomqn6JMVBOVeDOVYJHto,
349
347
  nucliadb/writer/tus/s3.py,sha256=vF0NkFTXiXhXq3bCVXXVV-ED38ECVoUeeYViP8uMqcU,8357
350
348
  nucliadb/writer/tus/storage.py,sha256=ToqwjoYnjI4oIcwzkhha_MPxi-k4Jk3Lt55zRwaC1SM,2903
351
349
  nucliadb/writer/tus/utils.py,sha256=MSdVbRsRSZVdkaum69_0wku7X3p5wlZf4nr6E0GMKbw,2556
352
- nucliadb-6.3.1.post3546.dist-info/METADATA,sha256=DNgc6YIrIj8zGIewzwiHRTsrQL_kYcQVR8VddQdzKLw,4291
353
- nucliadb-6.3.1.post3546.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
354
- nucliadb-6.3.1.post3546.dist-info/entry_points.txt,sha256=XqGfgFDuY3zXQc8ewXM2TRVjTModIq851zOsgrmaXx4,1268
355
- nucliadb-6.3.1.post3546.dist-info/top_level.txt,sha256=hwYhTVnX7jkQ9gJCkVrbqEG1M4lT2F_iPQND1fCzF80,20
356
- nucliadb-6.3.1.post3546.dist-info/RECORD,,
350
+ nucliadb-6.3.1.post3557.dist-info/METADATA,sha256=z3g4U4gIG0vYmw55DjZgSN2PJJUziqy_qvdWFaAxDbA,4291
351
+ nucliadb-6.3.1.post3557.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
352
+ nucliadb-6.3.1.post3557.dist-info/entry_points.txt,sha256=XqGfgFDuY3zXQc8ewXM2TRVjTModIq851zOsgrmaXx4,1268
353
+ nucliadb-6.3.1.post3557.dist-info/top_level.txt,sha256=hwYhTVnX7jkQ9gJCkVrbqEG1M4lT2F_iPQND1fCzF80,20
354
+ nucliadb-6.3.1.post3557.dist-info/RECORD,,
@@ -1,77 +0,0 @@
1
- # Copyright (C) 2021 Bosutech XXI S.L.
2
- #
3
- # nucliadb is offered under the AGPL v3.0 and as commercial software.
4
- # For commercial licensing, contact us at info@nuclia.com.
5
- #
6
- # AGPL:
7
- # This program is free software: you can redistribute it and/or modify
8
- # it under the terms of the GNU Affero General Public License as
9
- # published by the Free Software Foundation, either version 3 of the
10
- # License, or (at your option) any later version.
11
- #
12
- # This program is distributed in the hope that it will be useful,
13
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
- # GNU Affero General Public License for more details.
16
- #
17
- # You should have received a copy of the GNU Affero General Public License
18
- # along with this program. If not, see <http://www.gnu.org/licenses/>.
19
- #
20
- from typing import Optional
21
-
22
- from lru import LRU
23
-
24
- from nucliadb.common.cluster.base import AbstractIndexNode
25
- from nucliadb.common.cluster.grpc_node_dummy import DummyReaderStub, DummyWriterStub
26
- from nucliadb.ingest import SERVICE_NAME
27
- from nucliadb_protos.nodereader_pb2_grpc import NodeReaderStub
28
- from nucliadb_protos.nodewriter_pb2_grpc import NodeWriterStub
29
- from nucliadb_utils.grpc import get_traced_grpc_channel
30
-
31
- from .settings import settings
32
-
33
- READ_CONNECTIONS = LRU(50) # type: ignore
34
- WRITE_CONNECTIONS = LRU(50) # type: ignore
35
-
36
-
37
- class IndexNode(AbstractIndexNode):
38
- _writer: Optional[NodeWriterStub] = None
39
- _reader: Optional[NodeReaderStub] = None
40
-
41
- def _get_service_address(self, port_map: dict[str, int], port: Optional[int]) -> str:
42
- hostname = self.address.split(":")[0]
43
- if port is None:
44
- # For testing purposes we need to be able to have a writing port
45
- port = port_map[hostname]
46
- grpc_address = f"localhost:{port}"
47
- else:
48
- grpc_address = f"{hostname}:{port}"
49
- return grpc_address
50
-
51
- @property
52
- def writer(self) -> NodeWriterStub:
53
- if self._writer is None or self.address not in WRITE_CONNECTIONS:
54
- if not self.dummy:
55
- grpc_address = self._get_service_address(
56
- settings.writer_port_map, settings.node_writer_port
57
- )
58
- channel = get_traced_grpc_channel(grpc_address, SERVICE_NAME, variant="_writer")
59
- WRITE_CONNECTIONS[self.address] = NodeWriterStub(channel)
60
- else:
61
- WRITE_CONNECTIONS[self.address] = DummyWriterStub()
62
- self._writer = WRITE_CONNECTIONS[self.address]
63
- return self._writer # type: ignore
64
-
65
- @property
66
- def reader(self) -> NodeReaderStub:
67
- if self._reader is None or self.address not in READ_CONNECTIONS:
68
- if not self.dummy:
69
- grpc_address = self._get_service_address(
70
- settings.reader_port_map, settings.node_reader_port
71
- )
72
- channel = get_traced_grpc_channel(grpc_address, SERVICE_NAME, variant="_reader")
73
- READ_CONNECTIONS[self.address] = NodeReaderStub(channel)
74
- else:
75
- READ_CONNECTIONS[self.address] = DummyReaderStub()
76
- self._reader = READ_CONNECTIONS[self.address]
77
- return self._reader # type: ignore
nucliadb/ingest/cache.py DELETED
@@ -1,25 +0,0 @@
1
- # Copyright (C) 2021 Bosutech XXI S.L.
2
- #
3
- # nucliadb is offered under the AGPL v3.0 and as commercial software.
4
- # For commercial licensing, contact us at info@nuclia.com.
5
- #
6
- # AGPL:
7
- # This program is free software: you can redistribute it and/or modify
8
- # it under the terms of the GNU Affero General Public License as
9
- # published by the Free Software Foundation, either version 3 of the
10
- # License, or (at your option) any later version.
11
- #
12
- # This program is distributed in the hope that it will be useful,
13
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
- # GNU Affero General Public License for more details.
16
- #
17
- # You should have received a copy of the GNU Affero General Public License
18
- # along with this program. If not, see <http://www.gnu.org/licenses/>.
19
- #
20
- from nucliadb.common.cluster.index_node import READ_CONNECTIONS, WRITE_CONNECTIONS
21
-
22
-
23
- def clear_ingest_cache():
24
- READ_CONNECTIONS.clear()
25
- WRITE_CONNECTIONS.clear()