nucliadb 6.9.1.post5180__py3-none-any.whl → 6.9.2.post5282__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nucliadb might be problematic. Click here for more details.
- migrations/pg/0010_shards_index.py +34 -0
- nucliadb/common/cluster/manager.py +3 -19
- nucliadb/common/cluster/rebalance.py +484 -110
- nucliadb/common/cluster/rollover.py +29 -0
- nucliadb/common/cluster/utils.py +26 -0
- nucliadb/common/datamanagers/atomic.py +6 -0
- nucliadb/common/filter_expression.py +15 -32
- nucliadb/ingest/consumer/service.py +1 -2
- nucliadb/ingest/consumer/shard_creator.py +16 -5
- nucliadb/ingest/fields/base.py +0 -17
- nucliadb/ingest/orm/knowledgebox.py +78 -29
- nucliadb/ingest/orm/processor/processor.py +21 -16
- nucliadb/ingest/service/writer.py +12 -5
- nucliadb/migrator/datamanager.py +1 -7
- nucliadb/purge/__init__.py +2 -7
- nucliadb/reader/api/v1/learning_config.py +21 -0
- nucliadb/search/api/v1/find.py +1 -4
- nucliadb/search/api/v1/resource/ask.py +21 -1
- nucliadb/search/api/v1/search.py +1 -4
- nucliadb/search/search/chat/ask.py +0 -1
- nucliadb/search/search/chat/prompt.py +45 -13
- nucliadb/search/search/chat/query.py +0 -1
- nucliadb/search/search/find.py +1 -6
- nucliadb/search/search/query.py +0 -23
- nucliadb/search/search/query_parser/models.py +0 -1
- nucliadb/search/search/query_parser/parsers/catalog.py +2 -2
- nucliadb/search/search/query_parser/parsers/find.py +0 -8
- nucliadb/search/search/query_parser/parsers/search.py +0 -8
- nucliadb/search/search/query_parser/parsers/unit_retrieval.py +4 -11
- nucliadb/writer/api/v1/knowledgebox.py +15 -22
- {nucliadb-6.9.1.post5180.dist-info → nucliadb-6.9.2.post5282.dist-info}/METADATA +8 -9
- {nucliadb-6.9.1.post5180.dist-info → nucliadb-6.9.2.post5282.dist-info}/RECORD +35 -34
- {nucliadb-6.9.1.post5180.dist-info → nucliadb-6.9.2.post5282.dist-info}/WHEEL +0 -0
- {nucliadb-6.9.1.post5180.dist-info → nucliadb-6.9.2.post5282.dist-info}/entry_points.txt +0 -0
- {nucliadb-6.9.1.post5180.dist-info → nucliadb-6.9.2.post5282.dist-info}/top_level.txt +0 -0
|
@@ -18,162 +18,532 @@
|
|
|
18
18
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
19
19
|
#
|
|
20
20
|
import asyncio
|
|
21
|
+
import dataclasses
|
|
21
22
|
import logging
|
|
23
|
+
import math
|
|
24
|
+
import random
|
|
25
|
+
from typing import Optional, cast
|
|
22
26
|
|
|
27
|
+
from grpc import StatusCode
|
|
28
|
+
from grpc.aio import AioRpcError
|
|
23
29
|
from nidx_protos import nodereader_pb2, noderesources_pb2
|
|
24
30
|
|
|
25
31
|
from nucliadb.common import datamanagers, locking
|
|
26
32
|
from nucliadb.common.cluster.utils import get_shard_manager
|
|
27
33
|
from nucliadb.common.context import ApplicationContext
|
|
34
|
+
from nucliadb.common.maindb.driver import Driver
|
|
35
|
+
from nucliadb.common.maindb.pg import PGDriver
|
|
28
36
|
from nucliadb.common.nidx import get_nidx_api_client, get_nidx_searcher_client
|
|
37
|
+
from nucliadb_protos import writer_pb2
|
|
29
38
|
from nucliadb_telemetry import errors
|
|
30
39
|
from nucliadb_telemetry.logs import setup_logging
|
|
31
40
|
from nucliadb_telemetry.utils import setup_telemetry
|
|
41
|
+
from nucliadb_utils import const
|
|
32
42
|
from nucliadb_utils.fastapi.run import serve_metrics
|
|
43
|
+
from nucliadb_utils.utilities import has_feature
|
|
33
44
|
|
|
34
45
|
from .settings import settings
|
|
35
|
-
from .utils import delete_resource_from_shard, index_resource_to_shard
|
|
46
|
+
from .utils import delete_resource_from_shard, index_resource_to_shard, wait_for_nidx
|
|
36
47
|
|
|
37
48
|
logger = logging.getLogger(__name__)
|
|
38
49
|
|
|
39
50
|
REBALANCE_LOCK = "rebalance"
|
|
40
51
|
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
52
|
+
MAX_MOVES_PER_SHARD = 100
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@dataclasses.dataclass
|
|
56
|
+
class RebalanceShard:
|
|
57
|
+
id: str
|
|
58
|
+
nidx_id: str
|
|
59
|
+
paragraphs: int
|
|
60
|
+
active: bool
|
|
61
|
+
|
|
62
|
+
def to_dict(self):
|
|
63
|
+
return self.__dict__
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class Rebalancer:
|
|
67
|
+
def __init__(self, context: ApplicationContext, kbid: str):
|
|
68
|
+
self.context = context
|
|
69
|
+
self.kbid = kbid
|
|
70
|
+
self.kb_shards: Optional[writer_pb2.Shards] = None
|
|
71
|
+
self.index: dict[str, int] = {}
|
|
72
|
+
|
|
73
|
+
async def get_rebalance_shards(self) -> list[RebalanceShard]:
|
|
74
|
+
"""
|
|
75
|
+
Return the sorted list of shards by increasing paragraph count.
|
|
76
|
+
"""
|
|
77
|
+
self.kb_shards = await datamanagers.atomic.cluster.get_kb_shards(kbid=self.kbid)
|
|
78
|
+
if self.kb_shards is None: # pragma: no cover
|
|
79
|
+
return []
|
|
80
|
+
return list(
|
|
81
|
+
sorted(
|
|
82
|
+
[
|
|
83
|
+
RebalanceShard(
|
|
84
|
+
id=shard.shard,
|
|
85
|
+
nidx_id=shard.nidx_shard_id,
|
|
86
|
+
paragraphs=await get_shard_paragraph_count(shard.nidx_shard_id),
|
|
87
|
+
active=(idx == self.kb_shards.actual),
|
|
88
|
+
)
|
|
89
|
+
for idx, shard in enumerate(self.kb_shards.shards)
|
|
90
|
+
],
|
|
91
|
+
key=lambda x: x.paragraphs,
|
|
92
|
+
)
|
|
58
93
|
)
|
|
59
|
-
results[shard_meta.shard] = shard_data.paragraphs
|
|
60
94
|
|
|
61
|
-
|
|
95
|
+
async def build_shard_resources_index(self):
|
|
96
|
+
self.index = await build_shard_resources_index(self.context.kv_driver, self.kbid)
|
|
62
97
|
|
|
98
|
+
async def move_paragraphs(
|
|
99
|
+
self, from_shard: RebalanceShard, to_shard: RebalanceShard, max_paragraphs: int
|
|
100
|
+
) -> int:
|
|
101
|
+
"""
|
|
102
|
+
Takes random resources from the source shard and tries to move at most max_paragraphs.
|
|
103
|
+
It stops moving paragraphs until the are no more resources to move.
|
|
104
|
+
"""
|
|
105
|
+
moved_paragraphs = 0
|
|
63
106
|
|
|
64
|
-
|
|
65
|
-
async with locking.distributed_lock(locking.NEW_SHARD_LOCK.format(kbid=kbid)):
|
|
66
|
-
async with datamanagers.with_ro_transaction() as txn:
|
|
67
|
-
kb_shards = await datamanagers.cluster.get_kb_shards(txn, kbid=kbid)
|
|
68
|
-
if kb_shards is None:
|
|
69
|
-
return
|
|
107
|
+
resources_batch: list[str] = []
|
|
70
108
|
|
|
71
|
-
|
|
72
|
-
|
|
109
|
+
while moved_paragraphs < max_paragraphs:
|
|
110
|
+
if len(resources_batch) == 0:
|
|
111
|
+
resources_batch = await get_resources_from_shard(
|
|
112
|
+
self.context.kv_driver, self.kbid, from_shard.id, n=50
|
|
113
|
+
)
|
|
114
|
+
if len(resources_batch) == 0:
|
|
115
|
+
# No more resources to move or shard not found
|
|
116
|
+
break
|
|
117
|
+
|
|
118
|
+
# Take a random resource to move
|
|
119
|
+
resource_id = random.choice(resources_batch)
|
|
120
|
+
|
|
121
|
+
assert self.kb_shards is not None
|
|
122
|
+
from_shard_obj = next(s for s in self.kb_shards.shards if s.shard == from_shard.id)
|
|
123
|
+
to_shard_obj = next(s for s in self.kb_shards.shards if s.shard == to_shard.id)
|
|
124
|
+
paragraphs_count = await get_resource_paragraphs_count(resource_id, from_shard.nidx_id)
|
|
125
|
+
moved = await move_resource_to_shard(
|
|
126
|
+
self.context, self.kbid, resource_id, from_shard_obj, to_shard_obj
|
|
127
|
+
)
|
|
128
|
+
if moved:
|
|
129
|
+
resources_batch.remove(resource_id)
|
|
130
|
+
self.index[from_shard.id] = self.index.get(from_shard.id, 1) - 1
|
|
131
|
+
self.index[to_shard.id] = self.index.get(to_shard.id, 0) + 1
|
|
132
|
+
moved_paragraphs += paragraphs_count
|
|
73
133
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
134
|
+
return moved_paragraphs
|
|
135
|
+
|
|
136
|
+
async def wait_for_indexing(self):
|
|
137
|
+
try:
|
|
138
|
+
self.context.nats_manager
|
|
139
|
+
except AssertionError: # pragma: no cover
|
|
140
|
+
logger.warning(f"Nats manager not initialized. Cannot wait for indexing")
|
|
141
|
+
return
|
|
142
|
+
while True:
|
|
143
|
+
try:
|
|
144
|
+
await wait_for_nidx(self.context.nats_manager, max_wait_seconds=60, max_pending=1000)
|
|
145
|
+
return
|
|
146
|
+
except asyncio.TimeoutError:
|
|
147
|
+
logger.warning("Nidx is behind. Backing off rebalancing.", extra={"kbid": self.kbid})
|
|
148
|
+
await asyncio.sleep(30)
|
|
149
|
+
|
|
150
|
+
async def rebalance_shards(self):
|
|
151
|
+
"""
|
|
152
|
+
Iterate over shards until none of them need more rebalancing.
|
|
153
|
+
|
|
154
|
+
Will move excess of paragraphs to other shards (potentially creating new ones), and
|
|
155
|
+
merge small shards together when possible (potentially deleting empty ones.)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
Merge chooses a <90% filled shard and fills it to almost 100%
|
|
159
|
+
Split chooses a >110% filled shard and reduces it to 100%
|
|
160
|
+
If the shard is between 90% and 110% full, nobody touches it
|
|
161
|
+
"""
|
|
162
|
+
await self.build_shard_resources_index()
|
|
163
|
+
while True:
|
|
164
|
+
await self.wait_for_indexing()
|
|
165
|
+
|
|
166
|
+
shards = await self.get_rebalance_shards()
|
|
167
|
+
|
|
168
|
+
# Any shards to split?
|
|
169
|
+
shard_to_split = next((s for s in shards[::-1] if needs_split(s)), None)
|
|
170
|
+
if shard_to_split is not None:
|
|
171
|
+
await self.split_shard(shard_to_split, shards)
|
|
172
|
+
continue
|
|
173
|
+
|
|
174
|
+
# Any shards to merge?
|
|
175
|
+
shard_to_merge = next((s for s in shards if needs_merge(s, shards)), None)
|
|
176
|
+
if shard_to_merge is not None:
|
|
177
|
+
await self.merge_shard(shard_to_merge, shards)
|
|
178
|
+
else:
|
|
179
|
+
break
|
|
180
|
+
|
|
181
|
+
async def split_shard(self, shard_to_split: RebalanceShard, shards: list[RebalanceShard]):
|
|
182
|
+
logger.info(
|
|
183
|
+
"Splitting excess of paragraphs to other shards",
|
|
184
|
+
extra={
|
|
185
|
+
"kbid": self.kbid,
|
|
186
|
+
"shard": shard_to_split.to_dict(),
|
|
187
|
+
},
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
# First off, calculate if the excess fits in the other shards or we need to add a new shard.
|
|
191
|
+
# Note that we don't filter out the active shard on purpose.
|
|
192
|
+
excess = shard_to_split.paragraphs - settings.max_shard_paragraphs
|
|
193
|
+
other_shards = [s for s in shards if s.id != shard_to_split.id]
|
|
194
|
+
other_shards_capacity = sum(
|
|
195
|
+
[max(0, (settings.max_shard_paragraphs - s.paragraphs)) for s in other_shards]
|
|
196
|
+
)
|
|
197
|
+
if excess > other_shards_capacity:
|
|
198
|
+
shards_to_add = math.ceil((excess - other_shards_capacity) / settings.max_shard_paragraphs)
|
|
199
|
+
logger.info(
|
|
200
|
+
"More shards needed",
|
|
201
|
+
extra={
|
|
202
|
+
"kbid": self.kbid,
|
|
203
|
+
"shards_to_add": shards_to_add,
|
|
204
|
+
"all_shards": [s.to_dict() for s in shards],
|
|
205
|
+
},
|
|
206
|
+
)
|
|
207
|
+
# Add new shards where to rebalance the excess of paragraphs
|
|
208
|
+
async with (
|
|
209
|
+
locking.distributed_lock(locking.NEW_SHARD_LOCK.format(kbid=self.kbid)),
|
|
210
|
+
datamanagers.with_rw_transaction() as txn,
|
|
211
|
+
):
|
|
212
|
+
kb_config = await datamanagers.kb.get_config(txn, kbid=self.kbid)
|
|
213
|
+
prewarm = kb_config is not None and kb_config.prewarm_enabled
|
|
79
214
|
sm = get_shard_manager()
|
|
80
|
-
|
|
215
|
+
for _ in range(shards_to_add):
|
|
216
|
+
await sm.create_shard_by_kbid(txn, self.kbid, prewarm_enabled=prewarm)
|
|
81
217
|
await txn.commit()
|
|
82
218
|
|
|
219
|
+
# Recalculate after having created shards, the active shard is a different one
|
|
220
|
+
shards = await self.get_rebalance_shards()
|
|
221
|
+
|
|
222
|
+
# Now, move resources to other shards as long as we are still over the max
|
|
223
|
+
for _ in range(MAX_MOVES_PER_SHARD):
|
|
224
|
+
shard_paragraphs = next(s.paragraphs for s in shards if s.id == shard_to_split.id)
|
|
225
|
+
excess = shard_paragraphs - settings.max_shard_paragraphs
|
|
226
|
+
if excess <= 0:
|
|
227
|
+
logger.info(
|
|
228
|
+
"Shard rebalanced successfuly",
|
|
229
|
+
extra={"kbid": self.kbid, "shard": shard_to_split.to_dict()},
|
|
230
|
+
)
|
|
231
|
+
break
|
|
83
232
|
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
to_shard_id: str,
|
|
89
|
-
count: int = 20,
|
|
90
|
-
) -> None:
|
|
91
|
-
async with datamanagers.with_ro_transaction() as txn:
|
|
92
|
-
kb_shards = await datamanagers.cluster.get_kb_shards(txn, kbid=kbid)
|
|
93
|
-
if kb_shards is None: # pragma: no cover
|
|
94
|
-
logger.warning("No shards found for kb. This should not happen.", extra={"kbid": kbid})
|
|
95
|
-
return
|
|
233
|
+
target_shard, target_capacity = get_target_shard(shards, shard_to_split, skip_active=False)
|
|
234
|
+
if target_shard is None:
|
|
235
|
+
logger.warning("No target shard found for splitting", extra={"kbid": self.kbid})
|
|
236
|
+
break
|
|
96
237
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
238
|
+
moved_paragraphs = await self.move_paragraphs(
|
|
239
|
+
from_shard=shard_to_split,
|
|
240
|
+
to_shard=target_shard,
|
|
241
|
+
max_paragraphs=min(excess, target_capacity),
|
|
242
|
+
)
|
|
101
243
|
|
|
102
|
-
|
|
103
|
-
|
|
244
|
+
# Update shard paragraph counts
|
|
245
|
+
shard_to_split.paragraphs -= moved_paragraphs
|
|
246
|
+
target_shard.paragraphs += moved_paragraphs
|
|
247
|
+
shards.sort(key=lambda x: x.paragraphs)
|
|
104
248
|
|
|
105
|
-
|
|
106
|
-
shard=from_shard.nidx_shard_id,
|
|
107
|
-
paragraph=False,
|
|
108
|
-
document=True,
|
|
109
|
-
result_per_page=count,
|
|
110
|
-
)
|
|
111
|
-
request.field_filter.field.field_type = "a"
|
|
112
|
-
request.field_filter.field.field_id = "title"
|
|
113
|
-
search_response: nodereader_pb2.SearchResponse = await get_nidx_searcher_client().Search(request)
|
|
249
|
+
await self.wait_for_indexing()
|
|
114
250
|
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
251
|
+
async def merge_shard(self, shard_to_merge: RebalanceShard, shards: list[RebalanceShard]):
|
|
252
|
+
logger.info(
|
|
253
|
+
"Merging shard",
|
|
254
|
+
extra={
|
|
255
|
+
"kbid": self.kbid,
|
|
256
|
+
"shard": shard_to_merge.to_dict(),
|
|
257
|
+
},
|
|
258
|
+
)
|
|
259
|
+
empty_shard = False
|
|
260
|
+
|
|
261
|
+
for _ in range(MAX_MOVES_PER_SHARD):
|
|
262
|
+
resources_count = self.index.get(shard_to_merge.id, 0)
|
|
263
|
+
if resources_count == 0:
|
|
264
|
+
logger.info(
|
|
265
|
+
"Shard is now empty",
|
|
266
|
+
extra={
|
|
267
|
+
"kbid": self.kbid,
|
|
268
|
+
"shard": shard_to_merge.to_dict(),
|
|
269
|
+
},
|
|
126
270
|
)
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
271
|
+
empty_shard = True
|
|
272
|
+
break
|
|
273
|
+
|
|
274
|
+
logger.info(
|
|
275
|
+
"Shard not yet empty",
|
|
276
|
+
extra={
|
|
277
|
+
"kbid": self.kbid,
|
|
278
|
+
"shard": shard_to_merge.to_dict(),
|
|
279
|
+
"remaining": resources_count,
|
|
280
|
+
},
|
|
281
|
+
)
|
|
133
282
|
|
|
134
|
-
|
|
135
|
-
|
|
283
|
+
target_shard, target_capacity = get_target_shard(shards, shard_to_merge, skip_active=True)
|
|
284
|
+
if target_shard is None:
|
|
285
|
+
logger.warning(
|
|
286
|
+
"No target shard could be found for merging. Moving on",
|
|
287
|
+
extra={"kbid": self.kbid, "shard": shard_to_merge.to_dict()},
|
|
136
288
|
)
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
289
|
+
break
|
|
290
|
+
|
|
291
|
+
moved_paragraphs = await self.move_paragraphs(
|
|
292
|
+
from_shard=shard_to_merge,
|
|
293
|
+
to_shard=target_shard,
|
|
294
|
+
max_paragraphs=target_capacity,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
# Update shard paragraph counts
|
|
298
|
+
shard_to_merge.paragraphs -= moved_paragraphs
|
|
299
|
+
target_shard.paragraphs += moved_paragraphs
|
|
300
|
+
shards.sort(key=lambda x: x.paragraphs)
|
|
301
|
+
|
|
302
|
+
await self.wait_for_indexing()
|
|
303
|
+
|
|
304
|
+
if empty_shard:
|
|
305
|
+
# Build the index again, and make sure there is no resource assigned to this shard
|
|
306
|
+
await self.build_shard_resources_index()
|
|
307
|
+
shard_resources = self.index.get(shard_to_merge.id, 0)
|
|
308
|
+
if shard_resources > 0:
|
|
309
|
+
logger.error(
|
|
310
|
+
f"Shard expected to be empty, but it isn't. Won't be deleted.",
|
|
311
|
+
extra={
|
|
312
|
+
"kbid": self.kbid,
|
|
313
|
+
"shard": shard_to_merge.id,
|
|
314
|
+
"resources": shard_resources,
|
|
315
|
+
},
|
|
316
|
+
)
|
|
317
|
+
return
|
|
318
|
+
|
|
319
|
+
# If shard was emptied, delete it
|
|
320
|
+
async with locking.distributed_lock(locking.NEW_SHARD_LOCK.format(kbid=self.kbid)):
|
|
321
|
+
async with datamanagers.with_rw_transaction() as txn:
|
|
322
|
+
kb_shards = await datamanagers.cluster.get_kb_shards(
|
|
323
|
+
txn, kbid=self.kbid, for_update=True
|
|
324
|
+
)
|
|
325
|
+
if kb_shards is not None:
|
|
326
|
+
logger.info(
|
|
327
|
+
"Deleting empty shard",
|
|
328
|
+
extra={
|
|
329
|
+
"kbid": self.kbid,
|
|
330
|
+
"shard_id": shard_to_merge.id,
|
|
331
|
+
"nidx_shard_id": shard_to_merge.nidx_id,
|
|
332
|
+
},
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
# Delete shards from kb shards in maindb
|
|
336
|
+
to_delete, to_delete_idx = next(
|
|
337
|
+
(s, idx)
|
|
338
|
+
for idx, s in enumerate(kb_shards.shards)
|
|
339
|
+
if s.shard == shard_to_merge.id
|
|
340
|
+
)
|
|
341
|
+
kb_shards.shards.remove(to_delete)
|
|
342
|
+
if to_delete_idx <= kb_shards.actual:
|
|
343
|
+
# Only decrement the actual pointer if we remove before the pointer.
|
|
344
|
+
kb_shards.actual -= 1
|
|
345
|
+
assert kb_shards.actual >= 0
|
|
346
|
+
await datamanagers.cluster.update_kb_shards(
|
|
347
|
+
txn, kbid=self.kbid, shards=kb_shards
|
|
348
|
+
)
|
|
349
|
+
await txn.commit()
|
|
350
|
+
|
|
351
|
+
# Delete shard from nidx
|
|
352
|
+
await get_nidx_api_client().DeleteShard(
|
|
353
|
+
noderesources_pb2.ShardId(id=to_delete.nidx_shard_id)
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
async def build_shard_resources_index(driver: Driver, kbid: str) -> dict[str, int]:
|
|
358
|
+
index: dict[str, int] = {}
|
|
359
|
+
driver = cast(PGDriver, driver)
|
|
360
|
+
async with driver._get_connection() as conn:
|
|
361
|
+
cur = conn.cursor("")
|
|
362
|
+
await cur.execute(
|
|
363
|
+
"""
|
|
364
|
+
SELECT encode(value, 'escape'), COUNT(*) FROM resources WHERE key ~ '/kbs/[^/]*/r/[^/]*/shard$' AND key ~ %s GROUP BY value;
|
|
365
|
+
""",
|
|
366
|
+
(f"/kbs/{kbid}/r/[^/]*/shard$",),
|
|
367
|
+
)
|
|
368
|
+
records = await cur.fetchall()
|
|
369
|
+
shard: str
|
|
370
|
+
resources_count: int
|
|
371
|
+
for shard, resources_count in records:
|
|
372
|
+
index[shard] = resources_count
|
|
373
|
+
return index
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
async def get_resources_from_shard(driver: Driver, kbid: str, shard_id: str, n: int) -> list[str]:
|
|
377
|
+
driver = cast(PGDriver, driver)
|
|
378
|
+
async with driver._get_connection() as conn:
|
|
379
|
+
cur = conn.cursor("")
|
|
380
|
+
await cur.execute(
|
|
381
|
+
"""
|
|
382
|
+
SELECT split_part(key, '/', 5) FROM resources WHERE key ~ '/kbs/[^/]*/r/[^/]*/shard$' AND key ~ %s AND encode(value, 'escape') LIKE %s limit %s;
|
|
383
|
+
""",
|
|
384
|
+
(f"/kbs/{kbid}/r/[^/]*/shard$", shard_id, n),
|
|
385
|
+
)
|
|
386
|
+
records = await cur.fetchall()
|
|
387
|
+
rids: list[str] = [r[0] for r in records]
|
|
388
|
+
return rids
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
async def get_resource_paragraphs_count(resource_id: str, nidx_shard_id: str) -> int:
|
|
392
|
+
# Do a search on the fields (paragraph) index and return the number of paragraphs this resource has
|
|
393
|
+
try:
|
|
394
|
+
request = nodereader_pb2.SearchRequest(
|
|
395
|
+
shard=nidx_shard_id,
|
|
396
|
+
paragraph=True,
|
|
397
|
+
document=False,
|
|
398
|
+
result_per_page=0,
|
|
399
|
+
field_filter=nodereader_pb2.FilterExpression(
|
|
400
|
+
resource=nodereader_pb2.FilterExpression.ResourceFilter(resource_id=resource_id)
|
|
401
|
+
),
|
|
402
|
+
)
|
|
403
|
+
search_response: nodereader_pb2.SearchResponse = await get_nidx_searcher_client().Search(request)
|
|
404
|
+
return search_response.paragraph.total
|
|
405
|
+
except AioRpcError as exc: # pragma: no cover
|
|
406
|
+
if exc.code() == StatusCode.NOT_FOUND:
|
|
407
|
+
logger.warning(f"Shard not found in nidx", extra={"nidx_shard_id": nidx_shard_id})
|
|
408
|
+
return 0
|
|
409
|
+
raise
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
def get_target_shard(
|
|
413
|
+
shards: list[RebalanceShard], rebalanced_shard: RebalanceShard, skip_active: bool = True
|
|
414
|
+
) -> tuple[Optional[RebalanceShard], int]:
|
|
415
|
+
"""
|
|
416
|
+
Return the biggest shard with capacity (< 90% of the max paragraphs per shard).
|
|
417
|
+
"""
|
|
418
|
+
target_shard = next(
|
|
419
|
+
reversed(
|
|
420
|
+
[
|
|
421
|
+
s
|
|
422
|
+
for s in shards
|
|
423
|
+
if s.id != rebalanced_shard.id
|
|
424
|
+
and s.paragraphs < settings.max_shard_paragraphs * 0.9
|
|
425
|
+
and (not skip_active or (skip_active and not s.active))
|
|
426
|
+
]
|
|
427
|
+
),
|
|
428
|
+
None,
|
|
429
|
+
)
|
|
430
|
+
if target_shard is None: # pragma: no cover
|
|
431
|
+
return None, 0
|
|
432
|
+
|
|
433
|
+
# Aim to fill target shards up to 100% of max
|
|
434
|
+
capacity = int(max(0, settings.max_shard_paragraphs - target_shard.paragraphs))
|
|
435
|
+
return target_shard, capacity
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
async def get_shard_paragraph_count(nidx_shard_id: str) -> int:
|
|
439
|
+
# Do a search on the fields (paragraph) index
|
|
440
|
+
try:
|
|
441
|
+
request = nodereader_pb2.SearchRequest(
|
|
442
|
+
shard=nidx_shard_id,
|
|
443
|
+
paragraph=True,
|
|
444
|
+
document=False,
|
|
445
|
+
result_per_page=0,
|
|
446
|
+
)
|
|
447
|
+
search_response: nodereader_pb2.SearchResponse = await get_nidx_searcher_client().Search(request)
|
|
448
|
+
return search_response.paragraph.total
|
|
449
|
+
except AioRpcError as exc: # pragma: no cover
|
|
450
|
+
if exc.code() == StatusCode.NOT_FOUND:
|
|
451
|
+
logger.warning(f"Shard not found in nidx", extra={"nidx_shard_id": nidx_shard_id})
|
|
452
|
+
return 0
|
|
453
|
+
raise
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
async def move_resource_to_shard(
|
|
457
|
+
context: ApplicationContext,
|
|
458
|
+
kbid: str,
|
|
459
|
+
resource_id: str,
|
|
460
|
+
from_shard: writer_pb2.ShardObject,
|
|
461
|
+
to_shard: writer_pb2.ShardObject,
|
|
462
|
+
) -> bool:
|
|
463
|
+
indexed_to_new = False
|
|
464
|
+
deleted_from_old = False
|
|
465
|
+
try:
|
|
466
|
+
async with (
|
|
467
|
+
datamanagers.with_transaction() as txn,
|
|
468
|
+
locking.distributed_lock(
|
|
469
|
+
locking.RESOURCE_INDEX_LOCK.format(kbid=kbid, resource_id=resource_id)
|
|
470
|
+
),
|
|
471
|
+
):
|
|
472
|
+
found_shard_id = await datamanagers.resources.get_resource_shard_id(
|
|
473
|
+
txn, kbid=kbid, rid=resource_id, for_update=True
|
|
474
|
+
)
|
|
475
|
+
if found_shard_id is None: # pragma: no cover
|
|
476
|
+
# resource deleted
|
|
477
|
+
return False
|
|
478
|
+
if found_shard_id != from_shard.shard: # pragma: no cover
|
|
479
|
+
# resource could have already been moved
|
|
480
|
+
return False
|
|
481
|
+
|
|
482
|
+
await datamanagers.resources.set_resource_shard_id(
|
|
483
|
+
txn, kbid=kbid, rid=resource_id, shard=to_shard.shard
|
|
484
|
+
)
|
|
485
|
+
await index_resource_to_shard(context, kbid, resource_id, to_shard)
|
|
486
|
+
indexed_to_new = True
|
|
487
|
+
await delete_resource_from_shard(context, kbid, resource_id, from_shard)
|
|
488
|
+
deleted_from_old = True
|
|
489
|
+
await txn.commit()
|
|
490
|
+
return True
|
|
491
|
+
except Exception:
|
|
492
|
+
logger.exception(
|
|
493
|
+
"Failed to move resource",
|
|
494
|
+
extra={"kbid": kbid, "resource_id": resource_id},
|
|
495
|
+
)
|
|
496
|
+
# XXX Not ideal failure situation here. Try reverting the whole move even though it could be redundant
|
|
497
|
+
try:
|
|
498
|
+
if indexed_to_new:
|
|
499
|
+
await delete_resource_from_shard(context, kbid, resource_id, to_shard)
|
|
500
|
+
if deleted_from_old:
|
|
501
|
+
await index_resource_to_shard(context, kbid, resource_id, from_shard)
|
|
140
502
|
except Exception:
|
|
141
503
|
logger.exception(
|
|
142
|
-
"Failed to move resource",
|
|
504
|
+
"Failed to revert move resource. Hopefully you never see this message.",
|
|
143
505
|
extra={"kbid": kbid, "resource_id": resource_id},
|
|
144
506
|
)
|
|
145
|
-
|
|
146
|
-
try:
|
|
147
|
-
await index_resource_to_shard(context, kbid, resource_id, from_shard)
|
|
148
|
-
await delete_resource_from_shard(context, kbid, resource_id, to_shard)
|
|
149
|
-
except Exception:
|
|
150
|
-
logger.exception(
|
|
151
|
-
"Failed to revert move resource. Hopefully you never see this message.",
|
|
152
|
-
extra={"kbid": kbid, "resource_id": resource_id},
|
|
153
|
-
)
|
|
507
|
+
return False
|
|
154
508
|
|
|
155
509
|
|
|
156
|
-
|
|
157
|
-
|
|
510
|
+
def needs_split(shard: RebalanceShard) -> bool:
|
|
511
|
+
"""
|
|
512
|
+
Return true if the shard is more than 110% of the max.
|
|
158
513
|
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
largest_shard = shard_paragraphs[-1][0]
|
|
165
|
-
assert smallest_shard != largest_shard
|
|
514
|
+
Active shards are not considered for splitting: the shard creator subscriber will
|
|
515
|
+
eventually create a new shard, make it the active one and the previous one, if
|
|
516
|
+
too full, will be split.
|
|
517
|
+
"""
|
|
518
|
+
return not shard.active and (shard.paragraphs > (settings.max_shard_paragraphs * 1.1))
|
|
166
519
|
|
|
167
|
-
if smallest_shard in rebalanced_shards:
|
|
168
|
-
# XXX This is to prevent flapping data between shards on a single pass
|
|
169
|
-
# if we already rebalanced this shard, then we can't do anything else
|
|
170
|
-
break
|
|
171
520
|
|
|
172
|
-
|
|
521
|
+
def needs_merge(shard: RebalanceShard, all_shards: list[RebalanceShard]) -> bool:
|
|
522
|
+
"""
|
|
523
|
+
Returns true if a shard is less 75% full and there is enough capacity on the other shards to fit it.
|
|
524
|
+
|
|
525
|
+
Active shards are not considered for merging. Shards that are more than 75% full are also skipped.
|
|
526
|
+
"""
|
|
527
|
+
if shard.active:
|
|
528
|
+
return False
|
|
529
|
+
if shard.paragraphs > (settings.max_shard_paragraphs * 0.75):
|
|
530
|
+
return False
|
|
531
|
+
other_shards = [s for s in all_shards if s.id != shard.id and not s.active]
|
|
532
|
+
other_shards_capacity = sum(
|
|
533
|
+
[max(0, (settings.max_shard_paragraphs - s.paragraphs)) for s in other_shards]
|
|
534
|
+
)
|
|
535
|
+
return shard.paragraphs < other_shards_capacity
|
|
173
536
|
|
|
174
|
-
rebalanced_shards.add(largest_shard)
|
|
175
537
|
|
|
176
|
-
|
|
538
|
+
async def rebalance_kb(context: ApplicationContext, kbid: str) -> None:
|
|
539
|
+
rebalancer = Rebalancer(context, kbid)
|
|
540
|
+
try:
|
|
541
|
+
logger.info("Starting rebalance for kb", extra={"kbid": kbid})
|
|
542
|
+
await rebalancer.rebalance_shards()
|
|
543
|
+
logger.info("Finished rebalance for kb", extra={"kbid": kbid})
|
|
544
|
+
except Exception as err:
|
|
545
|
+
logger.exception("Rebalance finished with error", extra={"kbid": kbid})
|
|
546
|
+
errors.capture_exception(err)
|
|
177
547
|
|
|
178
548
|
|
|
179
549
|
async def run(context: ApplicationContext) -> None:
|
|
@@ -182,8 +552,12 @@ async def run(context: ApplicationContext) -> None:
|
|
|
182
552
|
# get all kb ids
|
|
183
553
|
async with datamanagers.with_ro_transaction() as txn:
|
|
184
554
|
kbids = [kbid async for kbid, _ in datamanagers.kb.get_kbs(txn)]
|
|
185
|
-
# go through each kb and see if shards need to be
|
|
555
|
+
# go through each kb and see if shards need to be rebalanced
|
|
186
556
|
for kbid in kbids:
|
|
557
|
+
if not has_feature(
|
|
558
|
+
const.Features.REBALANCE_ENABLED, default=False, context={"kbid": kbid}
|
|
559
|
+
):
|
|
560
|
+
continue
|
|
187
561
|
async with locking.distributed_lock(locking.KB_SHARDS_LOCK.format(kbid=kbid)):
|
|
188
562
|
await rebalance_kb(context, kbid)
|
|
189
563
|
except locking.ResourceLocked as exc:
|