nucliadb 6.3.1.post3459__py3-none-any.whl → 6.3.1.post3472__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nucliadb/common/cluster/exceptions.py +0 -20
- nucliadb/common/cluster/manager.py +0 -71
- nucliadb/ingest/consumer/service.py +1 -16
- nucliadb/ingest/service/writer.py +2 -2
- nucliadb/metrics_exporter.py +6 -6
- nucliadb/purge/orphan_shards.py +9 -48
- nucliadb/search/app.py +1 -25
- nucliadb/search/search/chat/ask.py +18 -0
- nucliadb/standalone/api_router.py +0 -20
- nucliadb/tests/config.py +0 -4
- {nucliadb-6.3.1.post3459.dist-info → nucliadb-6.3.1.post3472.dist-info}/METADATA +6 -6
- {nucliadb-6.3.1.post3459.dist-info → nucliadb-6.3.1.post3472.dist-info}/RECORD +15 -15
- {nucliadb-6.3.1.post3459.dist-info → nucliadb-6.3.1.post3472.dist-info}/WHEEL +0 -0
- {nucliadb-6.3.1.post3459.dist-info → nucliadb-6.3.1.post3472.dist-info}/entry_points.txt +0 -0
- {nucliadb-6.3.1.post3459.dist-info → nucliadb-6.3.1.post3472.dist-info}/top_level.txt +0 -0
@@ -27,10 +27,6 @@ class NotFound(Exception):
|
|
27
27
|
pass
|
28
28
|
|
29
29
|
|
30
|
-
class NodeClusterSmall(Exception):
|
31
|
-
pass
|
32
|
-
|
33
|
-
|
34
30
|
class ShardNotFound(NotFound):
|
35
31
|
pass
|
36
32
|
|
@@ -39,18 +35,10 @@ class ShardsNotFound(NotFound):
|
|
39
35
|
pass
|
40
36
|
|
41
37
|
|
42
|
-
class NodesUnsync(Exception):
|
43
|
-
pass
|
44
|
-
|
45
|
-
|
46
38
|
class NodeError(Exception):
|
47
39
|
pass
|
48
40
|
|
49
41
|
|
50
|
-
class ExhaustedNodesError(Exception):
|
51
|
-
pass
|
52
|
-
|
53
|
-
|
54
42
|
class ReallyStopPulling(Exception):
|
55
43
|
pass
|
56
44
|
|
@@ -62,11 +50,3 @@ class SequenceOrderViolation(Exception):
|
|
62
50
|
|
63
51
|
class EntitiesGroupNotFound(NotFound):
|
64
52
|
pass
|
65
|
-
|
66
|
-
|
67
|
-
class NoHealthyNodeAvailable(Exception):
|
68
|
-
pass
|
69
|
-
|
70
|
-
|
71
|
-
class NodeConnectionError(Exception):
|
72
|
-
pass
|
@@ -22,14 +22,10 @@ import logging
|
|
22
22
|
import uuid
|
23
23
|
from typing import Any, Awaitable, Callable, Optional
|
24
24
|
|
25
|
-
import backoff
|
26
|
-
|
27
25
|
from nucliadb.common import datamanagers
|
28
26
|
from nucliadb.common.cluster.base import AbstractIndexNode
|
29
27
|
from nucliadb.common.cluster.exceptions import (
|
30
|
-
NodeClusterSmall,
|
31
28
|
NodeError,
|
32
|
-
NodesUnsync,
|
33
29
|
ShardNotFound,
|
34
30
|
ShardsNotFound,
|
35
31
|
)
|
@@ -49,17 +45,6 @@ from .settings import settings
|
|
49
45
|
|
50
46
|
logger = logging.getLogger(__name__)
|
51
47
|
|
52
|
-
INDEX_NODES: dict[str, AbstractIndexNode] = {}
|
53
|
-
READ_REPLICA_INDEX_NODES: dict[str, set[str]] = {}
|
54
|
-
|
55
|
-
|
56
|
-
def get_index_nodes(include_secondary: bool = False) -> list[AbstractIndexNode]:
|
57
|
-
return [get_nidx_fake_node()]
|
58
|
-
|
59
|
-
|
60
|
-
def get_index_node(node_id: str) -> Optional[AbstractIndexNode]:
|
61
|
-
return get_nidx_fake_node()
|
62
|
-
|
63
48
|
|
64
49
|
class KBShardManager:
|
65
50
|
# TODO: move to data manager
|
@@ -301,7 +286,6 @@ class StandaloneKBShardManager(KBShardManager):
|
|
301
286
|
self._lock = asyncio.Lock()
|
302
287
|
self._change_count: dict[tuple[str, str], int] = {}
|
303
288
|
|
304
|
-
@backoff.on_exception(backoff.expo, NodesUnsync, jitter=backoff.random_jitter, max_tries=5)
|
305
289
|
async def delete_resource(
|
306
290
|
self,
|
307
291
|
shard: writer_pb2.ShardObject,
|
@@ -321,7 +305,6 @@ class StandaloneKBShardManager(KBShardManager):
|
|
321
305
|
indexpb.typemessage = nodewriter_pb2.TypeMessage.DELETION
|
322
306
|
await nidx.index(indexpb)
|
323
307
|
|
324
|
-
@backoff.on_exception(backoff.expo, NodesUnsync, jitter=backoff.random_jitter, max_tries=5)
|
325
308
|
async def add_resource(
|
326
309
|
self,
|
327
310
|
shard: writer_pb2.ShardObject,
|
@@ -367,57 +350,3 @@ def choose_node(
|
|
367
350
|
) -> tuple[AbstractIndexNode, str]:
|
368
351
|
fake_node = get_nidx_fake_node()
|
369
352
|
return fake_node, shard.nidx_shard_id
|
370
|
-
|
371
|
-
|
372
|
-
def check_enough_nodes():
|
373
|
-
return True
|
374
|
-
"""
|
375
|
-
It raises an exception if it can't find enough nodes for the configured replicas.
|
376
|
-
"""
|
377
|
-
drain_nodes = settings.drain_nodes
|
378
|
-
target_replicas = settings.node_replicas
|
379
|
-
available_nodes = get_index_nodes()
|
380
|
-
available_nodes = [node for node in available_nodes if node.id not in drain_nodes]
|
381
|
-
if len(available_nodes) < target_replicas:
|
382
|
-
raise NodeClusterSmall(
|
383
|
-
f"Not enough nodes. Total: {len(available_nodes)}, Required: {target_replicas}"
|
384
|
-
)
|
385
|
-
if settings.max_node_replicas >= 0:
|
386
|
-
available_nodes = list(
|
387
|
-
filter(lambda n: n.shard_count < settings.max_node_replicas, available_nodes)
|
388
|
-
)
|
389
|
-
if len(available_nodes) < target_replicas:
|
390
|
-
raise NodeClusterSmall(
|
391
|
-
f"Could not find enough nodes with available shards. Available: {len(available_nodes)}, Required: {target_replicas}" # noqa
|
392
|
-
)
|
393
|
-
|
394
|
-
|
395
|
-
def sorted_primary_nodes(
|
396
|
-
avoid_nodes: Optional[list[str]] = None,
|
397
|
-
ignore_nodes: Optional[list[str]] = None,
|
398
|
-
) -> list[str]:
|
399
|
-
"""
|
400
|
-
Returns the list of all primary node ids sorted by decreasing available
|
401
|
-
disk space (from more to less available disk reported).
|
402
|
-
|
403
|
-
Nodes in `avoid_nodes` are placed at the tail of the list.
|
404
|
-
Nodes in `ignore_nodes` are ignored and never returned.
|
405
|
-
"""
|
406
|
-
primary_nodes = get_index_nodes(include_secondary=False)
|
407
|
-
|
408
|
-
# Sort by available disk
|
409
|
-
sorted_nodes = sorted(primary_nodes, key=lambda n: n.available_disk, reverse=True)
|
410
|
-
available_node_ids = [node.id for node in sorted_nodes]
|
411
|
-
|
412
|
-
avoid_nodes = avoid_nodes or []
|
413
|
-
ignore_nodes = ignore_nodes or []
|
414
|
-
|
415
|
-
# Get the non-avoided nodes first
|
416
|
-
preferred_nodes = [nid for nid in available_node_ids if nid not in avoid_nodes]
|
417
|
-
|
418
|
-
# Add avoid_nodes to the end of the last nodes
|
419
|
-
result_nodes = preferred_nodes + [nid for nid in available_node_ids if nid not in preferred_nodes]
|
420
|
-
|
421
|
-
# Remove ignore_nodes from the list
|
422
|
-
result_nodes = [nid for nid in result_nodes if nid not in ignore_nodes]
|
423
|
-
return result_nodes
|
@@ -22,14 +22,13 @@ import sys
|
|
22
22
|
from functools import partial
|
23
23
|
from typing import Awaitable, Callable, Optional
|
24
24
|
|
25
|
-
from nucliadb.common.cluster import manager
|
26
25
|
from nucliadb.common.maindb.utils import setup_driver
|
27
26
|
from nucliadb.ingest import SERVICE_NAME, logger
|
28
27
|
from nucliadb.ingest.consumer.consumer import IngestConsumer, IngestProcessedConsumer
|
29
28
|
from nucliadb.ingest.consumer.pull import PullWorker
|
30
29
|
from nucliadb.ingest.settings import settings
|
31
30
|
from nucliadb_utils.exceptions import ConfigurationError
|
32
|
-
from nucliadb_utils.settings import
|
31
|
+
from nucliadb_utils.settings import transaction_settings
|
33
32
|
from nucliadb_utils.utilities import (
|
34
33
|
get_audit,
|
35
34
|
get_nats_manager,
|
@@ -85,13 +84,6 @@ async def start_ingest_consumers(
|
|
85
84
|
if transaction_settings.transaction_local:
|
86
85
|
raise ConfigurationError("Can not start ingest consumers in local mode")
|
87
86
|
|
88
|
-
while len(manager.get_index_nodes()) == 0 and running_settings.running_environment not in (
|
89
|
-
"local",
|
90
|
-
"test",
|
91
|
-
):
|
92
|
-
logger.warning("Initializion delayed 1s to receive some Nodes on the cluster")
|
93
|
-
await asyncio.sleep(1)
|
94
|
-
|
95
87
|
driver = await setup_driver()
|
96
88
|
pubsub = await get_pubsub()
|
97
89
|
storage = await get_storage(service_name=service_name or SERVICE_NAME)
|
@@ -135,13 +127,6 @@ async def start_ingest_processed_consumer(
|
|
135
127
|
if transaction_settings.transaction_local:
|
136
128
|
raise ConfigurationError("Can not start ingest consumers in local mode")
|
137
129
|
|
138
|
-
while len(manager.get_index_nodes()) == 0 and running_settings.running_environment not in (
|
139
|
-
"local",
|
140
|
-
"test",
|
141
|
-
):
|
142
|
-
logger.warning("Initializion delayed 1s to receive some Nodes on the cluster")
|
143
|
-
await asyncio.sleep(1)
|
144
|
-
|
145
130
|
driver = await setup_driver()
|
146
131
|
pubsub = await get_pubsub()
|
147
132
|
storage = await get_storage(service_name=service_name or SERVICE_NAME)
|
@@ -22,7 +22,7 @@ from typing import AsyncIterator
|
|
22
22
|
|
23
23
|
from nucliadb.common import datamanagers
|
24
24
|
from nucliadb.common.cluster.exceptions import AlreadyExists, EntitiesGroupNotFound
|
25
|
-
from nucliadb.common.cluster.manager import
|
25
|
+
from nucliadb.common.cluster.manager import get_nidx_fake_node
|
26
26
|
from nucliadb.common.cluster.utils import get_shard_manager
|
27
27
|
from nucliadb.common.datamanagers.exceptions import KnowledgeBoxNotFound
|
28
28
|
from nucliadb.common.external_index_providers.exceptions import ExternalIndexCreationError
|
@@ -418,7 +418,7 @@ class WriterServicer(writer_pb2_grpc.WriterServicer):
|
|
418
418
|
shard_count=n.shard_count,
|
419
419
|
primary_id=n.primary_id or "",
|
420
420
|
)
|
421
|
-
for n in
|
421
|
+
for n in [get_nidx_fake_node()]
|
422
422
|
]
|
423
423
|
)
|
424
424
|
return response
|
nucliadb/metrics_exporter.py
CHANGED
@@ -24,11 +24,12 @@ from typing import AsyncGenerator, Callable, Tuple, cast
|
|
24
24
|
|
25
25
|
from nucliadb import logger
|
26
26
|
from nucliadb.common import datamanagers
|
27
|
-
from nucliadb.common.cluster import manager as cluster_manager
|
28
27
|
from nucliadb.common.context import ApplicationContext
|
29
28
|
from nucliadb.common.maindb.pg import PGDriver
|
30
29
|
from nucliadb.common.maindb.utils import get_driver
|
30
|
+
from nucliadb.common.nidx import get_nidx_api_client
|
31
31
|
from nucliadb.migrator.datamanager import MigrationsDataManager
|
32
|
+
from nucliadb_protos.noderesources_pb2 import EmptyQuery, NodeMetadata
|
32
33
|
from nucliadb_telemetry import metrics
|
33
34
|
from nucliadb_telemetry.logs import setup_logging
|
34
35
|
from nucliadb_telemetry.utils import setup_telemetry
|
@@ -48,11 +49,10 @@ async def update_node_metrics(context: ApplicationContext):
|
|
48
49
|
# Clear previoulsy set values so that we report only the current state
|
49
50
|
SHARD_COUNT.gauge.clear()
|
50
51
|
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
SHARD_COUNT.set(node.shard_count, labels=dict(node=node.id))
|
52
|
+
nidx_api = get_nidx_api_client()
|
53
|
+
metadata: NodeMetadata = await nidx_api.GetMetadata(EmptyQuery())
|
54
|
+
|
55
|
+
SHARD_COUNT.set(metadata.shard_count, labels={"node": "nidx"})
|
56
56
|
|
57
57
|
|
58
58
|
async def iter_kbids(context: ApplicationContext) -> AsyncGenerator[str, None]:
|
nucliadb/purge/orphan_shards.py
CHANGED
@@ -55,26 +55,15 @@ async def detect_orphan_shards(driver: Driver) -> dict[str, ShardLocation]:
|
|
55
55
|
"""
|
56
56
|
# To avoid detecting a new shard as orphan, query the index first and maindb
|
57
57
|
# afterwards
|
58
|
-
indexed_shards
|
59
|
-
available_nodes = manager.get_index_nodes()
|
60
|
-
for node in available_nodes:
|
61
|
-
node_shards = await _get_indexed_shards(node)
|
62
|
-
indexed_shards.update(node_shards)
|
63
|
-
|
58
|
+
indexed_shards = await _get_indexed_shards()
|
64
59
|
stored_shards = await _get_stored_shards(driver)
|
65
60
|
|
66
61
|
# Log an error in case we found a shard stored but not indexed, this should
|
67
62
|
# never happen as shards are created in the index node and then stored in
|
68
63
|
# maindb
|
69
64
|
not_indexed_shards = stored_shards.keys() - indexed_shards.keys()
|
70
|
-
available_nodes_ids = [node.id for node in available_nodes]
|
71
65
|
for shard_id in not_indexed_shards:
|
72
66
|
location = stored_shards[shard_id]
|
73
|
-
|
74
|
-
# skip shards from unavailable nodes
|
75
|
-
if location.node_id not in available_nodes_ids:
|
76
|
-
continue
|
77
|
-
|
78
67
|
logger.error(
|
79
68
|
"Found a shard on maindb not indexed in the index nodes",
|
80
69
|
extra={
|
@@ -86,41 +75,24 @@ async def detect_orphan_shards(driver: Driver) -> dict[str, ShardLocation]:
|
|
86
75
|
|
87
76
|
orphan_shard_ids = indexed_shards.keys() - stored_shards.keys()
|
88
77
|
orphan_shards: dict[str, ShardLocation] = {}
|
89
|
-
|
78
|
+
node = manager.get_nidx_fake_node()
|
90
79
|
async with datamanagers.with_ro_transaction() as txn:
|
91
80
|
for shard_id in orphan_shard_ids:
|
92
|
-
|
93
|
-
node = manager.get_index_node(node_id) # type: ignore
|
94
|
-
if node is None:
|
95
|
-
unavailable_nodes.add(node_id)
|
96
|
-
kbid = UNKNOWN_KB
|
97
|
-
else:
|
98
|
-
kbid = await _get_kbid(node, shard_id) or UNKNOWN_KB
|
99
|
-
|
81
|
+
kbid = await _get_kbid(node, shard_id) or UNKNOWN_KB
|
100
82
|
# Shards with knwon KB ids can be checked and ignore those comming from
|
101
83
|
# an ongoing migration/rollover
|
102
84
|
if kbid != UNKNOWN_KB:
|
103
85
|
skip = await datamanagers.rollover.is_rollover_shard(txn, kbid=kbid, shard_id=shard_id)
|
104
86
|
if skip:
|
105
87
|
continue
|
106
|
-
|
107
|
-
orphan_shards[shard_id] = ShardLocation(kbid=kbid, node_id=node_id)
|
108
|
-
|
109
|
-
if len(unavailable_nodes) > 0:
|
110
|
-
logger.info(
|
111
|
-
"Some nodes were unavailable while checking shard details and were skipped",
|
112
|
-
extra={"nodes": list(unavailable_nodes)},
|
113
|
-
)
|
114
|
-
|
88
|
+
orphan_shards[shard_id] = ShardLocation(kbid=kbid, node_id="nidx")
|
115
89
|
return orphan_shards
|
116
90
|
|
117
91
|
|
118
|
-
async def _get_indexed_shards(
|
119
|
-
|
120
|
-
|
121
|
-
for shard_id in
|
122
|
-
indexed_shards[shard_id] = ShardLocation(kbid=UNKNOWN_KB, node_id=node.id)
|
123
|
-
return indexed_shards
|
92
|
+
async def _get_indexed_shards() -> dict[str, ShardLocation]:
|
93
|
+
nidx = manager.get_nidx_fake_node()
|
94
|
+
shards = await nidx.list_shards()
|
95
|
+
return {shard_id: ShardLocation(kbid=UNKNOWN_KB, node_id="nidx") for shard_id in shards}
|
124
96
|
|
125
97
|
|
126
98
|
async def _get_stored_shards(driver: Driver) -> dict[str, ShardLocation]:
|
@@ -188,13 +160,8 @@ async def purge_orphan_shards(driver: Driver):
|
|
188
160
|
orphan_shards = await detect_orphan_shards(driver)
|
189
161
|
logger.info(f"Found {len(orphan_shards)} orphan shards. Purge starts...")
|
190
162
|
|
191
|
-
|
163
|
+
node = manager.get_nidx_fake_node()
|
192
164
|
for shard_id, location in orphan_shards.items():
|
193
|
-
node = manager.get_index_node(location.node_id)
|
194
|
-
if node is None:
|
195
|
-
unavailable_nodes.add(location.node_id)
|
196
|
-
continue
|
197
|
-
|
198
165
|
logger.info(
|
199
166
|
"Deleting orphan shard from index node",
|
200
167
|
extra={
|
@@ -205,12 +172,6 @@ async def purge_orphan_shards(driver: Driver):
|
|
205
172
|
)
|
206
173
|
await node.delete_shard(shard_id)
|
207
174
|
|
208
|
-
for node_id in unavailable_nodes:
|
209
|
-
logger.warning(
|
210
|
-
"Index node has been unavailable while purging. Orphan shards may still exist",
|
211
|
-
extra={"node_id": node_id},
|
212
|
-
)
|
213
|
-
|
214
175
|
|
215
176
|
def parse_arguments():
|
216
177
|
parser = argparse.ArgumentParser()
|
nucliadb/search/app.py
CHANGED
@@ -26,13 +26,10 @@ from starlette.middleware.authentication import AuthenticationMiddleware
|
|
26
26
|
from starlette.requests import ClientDisconnect, Request
|
27
27
|
from starlette.responses import HTMLResponse
|
28
28
|
|
29
|
-
from nucliadb.common.cluster import manager
|
30
|
-
from nucliadb.ingest.settings import DriverConfig
|
31
29
|
from nucliadb.middleware import ProcessTimeHeaderMiddleware
|
32
30
|
from nucliadb.search import API_PREFIX
|
33
31
|
from nucliadb.search.api.v1.router import api as api_v1
|
34
32
|
from nucliadb.search.lifecycle import lifespan
|
35
|
-
from nucliadb.search.settings import settings
|
36
33
|
from nucliadb_telemetry import errors
|
37
34
|
from nucliadb_telemetry.fastapi.utils import (
|
38
35
|
client_disconnect_handler,
|
@@ -89,28 +86,8 @@ async def homepage(request: Request) -> HTMLResponse:
|
|
89
86
|
return HTMLResponse("NucliaDB Search Service")
|
90
87
|
|
91
88
|
|
92
|
-
async def node_members(request: Request) -> JSONResponse:
|
93
|
-
return JSONResponse(
|
94
|
-
[
|
95
|
-
{
|
96
|
-
"id": node.id,
|
97
|
-
"listen_address": node.address,
|
98
|
-
"type": node.label,
|
99
|
-
"shard_count": node.shard_count,
|
100
|
-
"available_disk": node.available_disk,
|
101
|
-
"dummy": node.dummy,
|
102
|
-
"primary_id": node.primary_id,
|
103
|
-
}
|
104
|
-
for node in manager.get_index_nodes(include_secondary=True)
|
105
|
-
]
|
106
|
-
)
|
107
|
-
|
108
|
-
|
109
89
|
async def alive(request: Request) -> JSONResponse:
|
110
|
-
|
111
|
-
return JSONResponse({"status": "error"}, status_code=503)
|
112
|
-
else:
|
113
|
-
return JSONResponse({"status": "ok"})
|
90
|
+
return JSONResponse({"status": "ok"})
|
114
91
|
|
115
92
|
|
116
93
|
async def ready(request: Request) -> JSONResponse:
|
@@ -122,6 +99,5 @@ async def ready(request: Request) -> JSONResponse:
|
|
122
99
|
|
123
100
|
# Use raw starlette routes to avoid unnecessary overhead
|
124
101
|
application.add_route("/", homepage)
|
125
|
-
application.add_route("/node/members", node_members)
|
126
102
|
application.add_route("/health/alive", alive)
|
127
103
|
application.add_route("/health/ready", ready)
|
@@ -437,14 +437,32 @@ class NotEnoughContextAskResult(AskResult):
|
|
437
437
|
context in the corpus to answer.
|
438
438
|
"""
|
439
439
|
yield self._ndjson_encode(RetrievalAskResponseItem(results=self.main_results))
|
440
|
+
if self.prequeries_results:
|
441
|
+
yield self._ndjson_encode(
|
442
|
+
PrequeriesAskResponseItem(
|
443
|
+
results={
|
444
|
+
prequery.id or f"prequery_{index}": prequery_result
|
445
|
+
for index, (prequery, prequery_result) in enumerate(self.prequeries_results)
|
446
|
+
}
|
447
|
+
)
|
448
|
+
)
|
440
449
|
yield self._ndjson_encode(AnswerAskResponseItem(text=NOT_ENOUGH_CONTEXT_ANSWER))
|
441
450
|
status = AnswerStatusCode.NO_RETRIEVAL_DATA
|
442
451
|
yield self._ndjson_encode(StatusAskResponseItem(code=status.value, status=status.prettify()))
|
443
452
|
|
444
453
|
async def json(self) -> str:
|
454
|
+
prequeries = (
|
455
|
+
{
|
456
|
+
prequery.id or f"prequery_{index}": prequery_result
|
457
|
+
for index, (prequery, prequery_result) in enumerate(self.prequeries_results)
|
458
|
+
}
|
459
|
+
if self.prequeries_results
|
460
|
+
else None
|
461
|
+
)
|
445
462
|
return SyncAskResponse(
|
446
463
|
answer=NOT_ENOUGH_CONTEXT_ANSWER,
|
447
464
|
retrieval_results=self.main_results,
|
465
|
+
prequeries=prequeries,
|
448
466
|
status=AnswerStatusCode.NO_RETRIEVAL_DATA.prettify(),
|
449
467
|
).model_dump_json()
|
450
468
|
|
@@ -29,7 +29,6 @@ from fastapi_versioning import version
|
|
29
29
|
from jwcrypto import jwe, jwk # type: ignore
|
30
30
|
|
31
31
|
from nucliadb.common import datamanagers
|
32
|
-
from nucliadb.common.cluster import manager
|
33
32
|
from nucliadb.common.http_clients import processing
|
34
33
|
from nucliadb.common.http_clients.auth import NucliaAuthHTTPClient
|
35
34
|
from nucliadb.standalone import versions
|
@@ -103,23 +102,6 @@ def get_temp_access_token(request: Request):
|
|
103
102
|
return JSONResponse({"token": token})
|
104
103
|
|
105
104
|
|
106
|
-
@standalone_api_router.get("/cluster/nodes")
|
107
|
-
async def node_members(request: Request) -> JSONResponse:
|
108
|
-
return JSONResponse(
|
109
|
-
[
|
110
|
-
{
|
111
|
-
"id": node.id,
|
112
|
-
"listen_address": node.address,
|
113
|
-
"type": node.label,
|
114
|
-
"shard_count": node.shard_count,
|
115
|
-
"available_disk": node.available_disk,
|
116
|
-
"dummy": node.dummy,
|
117
|
-
}
|
118
|
-
for node in manager.get_index_nodes()
|
119
|
-
]
|
120
|
-
)
|
121
|
-
|
122
|
-
|
123
105
|
@standalone_api_router.get("/health/alive")
|
124
106
|
async def alive(request: Request) -> JSONResponse:
|
125
107
|
return JSONResponse({"status": "ok"})
|
@@ -127,8 +109,6 @@ async def alive(request: Request) -> JSONResponse:
|
|
127
109
|
|
128
110
|
@standalone_api_router.get("/health/ready")
|
129
111
|
async def ready(request: Request) -> JSONResponse:
|
130
|
-
if len(manager.get_index_nodes()) == 0:
|
131
|
-
return JSONResponse({"status": "not ready"}, status_code=503)
|
132
112
|
return JSONResponse({"status": "ok"})
|
133
113
|
|
134
114
|
|
nucliadb/tests/config.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: nucliadb
|
3
|
-
Version: 6.3.1.
|
3
|
+
Version: 6.3.1.post3472
|
4
4
|
Summary: NucliaDB
|
5
5
|
Author-email: Nuclia <nucliadb@nuclia.com>
|
6
6
|
License: AGPL
|
@@ -20,11 +20,11 @@ Classifier: Programming Language :: Python :: 3.12
|
|
20
20
|
Classifier: Programming Language :: Python :: 3 :: Only
|
21
21
|
Requires-Python: <4,>=3.9
|
22
22
|
Description-Content-Type: text/markdown
|
23
|
-
Requires-Dist: nucliadb-telemetry[all]>=6.3.1.
|
24
|
-
Requires-Dist: nucliadb-utils[cache,fastapi,storages]>=6.3.1.
|
25
|
-
Requires-Dist: nucliadb-protos>=6.3.1.
|
26
|
-
Requires-Dist: nucliadb-models>=6.3.1.
|
27
|
-
Requires-Dist: nidx-protos>=6.3.1.
|
23
|
+
Requires-Dist: nucliadb-telemetry[all]>=6.3.1.post3472
|
24
|
+
Requires-Dist: nucliadb-utils[cache,fastapi,storages]>=6.3.1.post3472
|
25
|
+
Requires-Dist: nucliadb-protos>=6.3.1.post3472
|
26
|
+
Requires-Dist: nucliadb-models>=6.3.1.post3472
|
27
|
+
Requires-Dist: nidx-protos>=6.3.1.post3472
|
28
28
|
Requires-Dist: nucliadb-admin-assets>=1.0.0.post1224
|
29
29
|
Requires-Dist: nuclia-models>=0.24.2
|
30
30
|
Requires-Dist: uvicorn
|
@@ -36,7 +36,7 @@ migrations/pg/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
|
36
36
|
nucliadb/__init__.py,sha256=_abCmDJ_0ku483Os4UAjPX7Nywm39cQgAV_DiyjsKeQ,891
|
37
37
|
nucliadb/health.py,sha256=UIxxA4oms4HIsCRZM_SZsdkIZIlgzmOxw-qSHLlWuak,3465
|
38
38
|
nucliadb/learning_proxy.py,sha256=Gf76qXxjl1lrHEFaCpOUfjjf0ab6eGLNxLMJz3-M_mo,19354
|
39
|
-
nucliadb/metrics_exporter.py,sha256=
|
39
|
+
nucliadb/metrics_exporter.py,sha256=6u0geEYFxgE5I2Fhl_sxsvGN-ZkaFZNGutSXwrzrsVs,5624
|
40
40
|
nucliadb/openapi.py,sha256=wDiw0dVEvTpJvbatkJ0JZLkKm9RItZT5PWRHjqRfqTA,2272
|
41
41
|
nucliadb/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
42
42
|
nucliadb/common/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
@@ -47,10 +47,10 @@ nucliadb/common/locking.py,sha256=RL0CabZVPzxHZyUjYeUyLvsJTm7W3J9o4fEgsY_ufNc,58
|
|
47
47
|
nucliadb/common/nidx.py,sha256=_LoU8D4afEtlW0c3vGUCoatDZvMr0-2l_GtIGap7VxA,10185
|
48
48
|
nucliadb/common/cluster/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
49
49
|
nucliadb/common/cluster/base.py,sha256=kklDqyvsubNX0W494ttl9f3E58lGaX6AXqAd8XX8ZHE,5522
|
50
|
-
nucliadb/common/cluster/exceptions.py,sha256=
|
50
|
+
nucliadb/common/cluster/exceptions.py,sha256=t7v_l93t44l2tQpdQXgO_w-c4YZRcaayOz1A2i0w4RQ,1258
|
51
51
|
nucliadb/common/cluster/grpc_node_dummy.py,sha256=L85wBnfab7Rev0CfsfUjPxQC6DiHPsETKrZAOLx9XHg,3510
|
52
52
|
nucliadb/common/cluster/index_node.py,sha256=g38H1kiAliF3Y6et_CWYInpn_xPxf7THAFJ7RtgLNZo,3246
|
53
|
-
nucliadb/common/cluster/manager.py,sha256
|
53
|
+
nucliadb/common/cluster/manager.py,sha256=TMPPfR_41922JiuGpKL5iqNBoZGmv8agfYxlnRQIVss,12726
|
54
54
|
nucliadb/common/cluster/rebalance.py,sha256=cLUlR08SsqmnoA_9GDflV6k2tXmkAPpyFxZErzp45vo,8754
|
55
55
|
nucliadb/common/cluster/rollover.py,sha256=iTJ9EQmHbzXL34foNFto-hqdC0Kq1pF1mNxqv0jqhBs,25362
|
56
56
|
nucliadb/common/cluster/settings.py,sha256=TMoym-cZsQ2soWfLAce0moSa2XncttQyhahL43LrWTo,3384
|
@@ -117,7 +117,7 @@ nucliadb/ingest/consumer/consumer.py,sha256=YIfkUmBqKeyAkJU1y1Vlld4pwFAKDdC_sX-s
|
|
117
117
|
nucliadb/ingest/consumer/materializer.py,sha256=7ofLbwjldJA8TWXDRZRM4U5EviZt3qNSQ8oadmkzS0Y,3840
|
118
118
|
nucliadb/ingest/consumer/metrics.py,sha256=ji1l_4cKiHJthQd8YNem1ft4iMbw9KThmVvJmLcv3Xg,1075
|
119
119
|
nucliadb/ingest/consumer/pull.py,sha256=EYT0ImngMQgatStG68p2GSrPQBbJxeuq8nFm8DdAbwk,9280
|
120
|
-
nucliadb/ingest/consumer/service.py,sha256=
|
120
|
+
nucliadb/ingest/consumer/service.py,sha256=BLM_dmKZkFBsYl3sj4MZZp5M3kkxHLuO7sE18PqIatw,6538
|
121
121
|
nucliadb/ingest/consumer/shard_creator.py,sha256=8SotMc-o_G8XZU52gR4Aay7tcigTdIXgz8YtxqHmJ1Q,4309
|
122
122
|
nucliadb/ingest/consumer/utils.py,sha256=jpX8D4lKzuPCpArQLZeX_Zczq3pfen_zAf8sPJfOEZU,2642
|
123
123
|
nucliadb/ingest/fields/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
@@ -145,7 +145,7 @@ nucliadb/ingest/orm/processor/processor.py,sha256=2FxAetUvtHvg6l-24xYrmBdsyqc0RU
|
|
145
145
|
nucliadb/ingest/orm/processor/sequence_manager.py,sha256=uqEphtI1Ir_yk9jRl2gPf7BlzzXWovbARY5MNZSBI_8,1704
|
146
146
|
nucliadb/ingest/service/__init__.py,sha256=MME_G_ERxzJR6JW_hfE2qcfXpmpH1kdG-S0a-M0qRm8,2043
|
147
147
|
nucliadb/ingest/service/exceptions.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
148
|
-
nucliadb/ingest/service/writer.py,sha256=
|
148
|
+
nucliadb/ingest/service/writer.py,sha256=wg_2Th9vMDuYolHe4GWwmiKO-Ovbs-ALF08Hp18iF78,20363
|
149
149
|
nucliadb/middleware/__init__.py,sha256=A8NBlBuEkunCFMKpR9gnfNELsVn0Plc55BIQMbWDM8Q,2202
|
150
150
|
nucliadb/migrator/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
151
151
|
nucliadb/migrator/command.py,sha256=dKbJ1tAmP6X4lMVRSSlz351euaqs2wBPpOczLjATUes,2089
|
@@ -159,7 +159,7 @@ nucliadb/migrator/utils.py,sha256=NgUreUvON8_nWEzTxELBMWlfV7E6-6qi-g0DMEbVEz4,28
|
|
159
159
|
nucliadb/models/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
160
160
|
nucliadb/models/responses.py,sha256=qnuOoc7TrVSUnpikfTwHLKez47_DE4mSFzpxrwtqijA,1599
|
161
161
|
nucliadb/purge/__init__.py,sha256=BphuNvsJ1aSwuVXUcSOaK4nj9pDcpuKRBf_QAcoRj-A,11787
|
162
|
-
nucliadb/purge/orphan_shards.py,sha256=
|
162
|
+
nucliadb/purge/orphan_shards.py,sha256=AU1Jfc4qtQFasB6dkuGz0a_Zjs-i7liUTf7Xpl8R8ng,7939
|
163
163
|
nucliadb/reader/__init__.py,sha256=C5Efic7WlGm2U2C5WOyquMFbIj2Pojwe_8mwzVYnOzE,1304
|
164
164
|
nucliadb/reader/app.py,sha256=Se-BFTE6d1v1msLzQn4q5XIhjnSxa2ckDSHdvm7NRf8,3096
|
165
165
|
nucliadb/reader/lifecycle.py,sha256=5jYyzMD1tpIh-OYbQoNMjKZ0-3D9KFnULa3B_Vf2xyY,1740
|
@@ -180,7 +180,7 @@ nucliadb/reader/api/v1/vectorsets.py,sha256=insTwaykshz442cMKa2VP74wJwvZrIYi0U7M
|
|
180
180
|
nucliadb/reader/reader/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
181
181
|
nucliadb/reader/reader/notifications.py,sha256=HVZNUlfbSuoZ9BsSs8wmzPeYurl0U0O2ooVlR9KSM3U,7792
|
182
182
|
nucliadb/search/__init__.py,sha256=tnypbqcH4nBHbGpkINudhKgdLKpwXQCvDtPchUlsyY4,1511
|
183
|
-
nucliadb/search/app.py,sha256
|
183
|
+
nucliadb/search/app.py,sha256=-WEX1AZRA8R_9aeOo9ovOTwjXW_7VfwWN7N2ccSoqXg,3387
|
184
184
|
nucliadb/search/lifecycle.py,sha256=DW8v4WUi4rZqc7xTOi3rE67W7877WG7fH9oTZbolHdE,2099
|
185
185
|
nucliadb/search/openapi.py,sha256=t3Wo_4baTrfPftg2BHsyLWNZ1MYn7ZRdW7ht-wFOgRs,1016
|
186
186
|
nucliadb/search/predict.py,sha256=z2-RkhMkH-5T6PtFkfESxNof07XiS5FxicLHPRyCUXc,22284
|
@@ -232,7 +232,7 @@ nucliadb/search/search/shards.py,sha256=JSRSrHgHcF4sXyuZZoJdMfK0v_LHpoSRf1lCr5-K
|
|
232
232
|
nucliadb/search/search/summarize.py,sha256=ksmYPubEQvAQgfPdZHfzB_rR19B2ci4IYZ6jLdHxZo8,4996
|
233
233
|
nucliadb/search/search/utils.py,sha256=iF2tbBA56gRMJH1TlE2hMrqeXqjoeOPt4KgRdp2m9Ek,3313
|
234
234
|
nucliadb/search/search/chat/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
235
|
-
nucliadb/search/search/chat/ask.py,sha256=
|
235
|
+
nucliadb/search/search/chat/ask.py,sha256=olZT08JVo3ZGDsDXkjvI2JTlqQln_o91HJzv0TKFQ7I,37244
|
236
236
|
nucliadb/search/search/chat/exceptions.py,sha256=Siy4GXW2L7oPhIR86H3WHBhE9lkV4A4YaAszuGGUf54,1356
|
237
237
|
nucliadb/search/search/chat/images.py,sha256=PA8VWxT5_HUGfW1ULhKTK46UBsVyINtWWqEM1ulzX1E,3095
|
238
238
|
nucliadb/search/search/chat/prompt.py,sha256=Jnja-Ss7skgnnDY8BymVfdeYsFPnIQFL8tEvcRXTKUE,47356
|
@@ -246,7 +246,7 @@ nucliadb/search/search/query_parser/models.py,sha256=VHDuyJlU2OLZN1usrQX53TZbPmW
|
|
246
246
|
nucliadb/search/search/query_parser/old_filters.py,sha256=-zbfN-RsXoj_DRjh3Lfp-wShwFXgkISawzVptVzja-A,9071
|
247
247
|
nucliadb/search/search/query_parser/parser.py,sha256=9TwkSNna3s-lCQIqBoSJzm6YbXdu8VIHJUan8M4ysfE,4667
|
248
248
|
nucliadb/standalone/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
249
|
-
nucliadb/standalone/api_router.py,sha256=
|
249
|
+
nucliadb/standalone/api_router.py,sha256=hgq9FXpihzgjHkwcVGfGCSwyXy67fqXTfLFHuINzIi0,5567
|
250
250
|
nucliadb/standalone/app.py,sha256=mAApNK_iVsQgJyd-mtwCeZq5csSimwnXmlQGH9a70pE,5586
|
251
251
|
nucliadb/standalone/auth.py,sha256=UwMv-TywhMZabvVg3anQLeCRdoHDnWf2o3luvnoNBjs,7670
|
252
252
|
nucliadb/standalone/config.py,sha256=g9JBJQfyw87TYZ3yuy0O9WFVLd_MmCJxSRSI0E8FwZE,5396
|
@@ -268,7 +268,7 @@ nucliadb/tasks/producer.py,sha256=_aTEulet1ebGhQMhoD3SlfHtfhZHNCbBfOf8uKyNpWk,35
|
|
268
268
|
nucliadb/tasks/registry.py,sha256=tKvv_Py_O3peuLshq4bB3w197E33O7P7B63_6loLMrA,1753
|
269
269
|
nucliadb/tasks/utils.py,sha256=abXcE_bjAkO4UwrKXhZykFPsRwbtwnuc0reNWhYImjw,1360
|
270
270
|
nucliadb/tests/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
|
271
|
-
nucliadb/tests/config.py,sha256=
|
271
|
+
nucliadb/tests/config.py,sha256=JN_Jhgj-fwM9_8IeO9pwxr6C1PiwRDrXxm67Y38rU30,2080
|
272
272
|
nucliadb/tests/vectors.py,sha256=CcNKx-E8LPpyvRyljbmb-Tn_wST9Juw2CBoogWrKiTk,62843
|
273
273
|
nucliadb/train/__init__.py,sha256=NVwe5yULoHXb80itIJT8YJYEz2xbiOPQ7_OMys6XJw8,1301
|
274
274
|
nucliadb/train/app.py,sha256=TiRttTvekLuZdIvi46E4HyuumDTkR4G4Luqq3fEdjes,2824
|
@@ -340,8 +340,8 @@ nucliadb/writer/tus/local.py,sha256=7jYa_w9b-N90jWgN2sQKkNcomqn6JMVBOVeDOVYJHto,
|
|
340
340
|
nucliadb/writer/tus/s3.py,sha256=vF0NkFTXiXhXq3bCVXXVV-ED38ECVoUeeYViP8uMqcU,8357
|
341
341
|
nucliadb/writer/tus/storage.py,sha256=ToqwjoYnjI4oIcwzkhha_MPxi-k4Jk3Lt55zRwaC1SM,2903
|
342
342
|
nucliadb/writer/tus/utils.py,sha256=MSdVbRsRSZVdkaum69_0wku7X3p5wlZf4nr6E0GMKbw,2556
|
343
|
-
nucliadb-6.3.1.
|
344
|
-
nucliadb-6.3.1.
|
345
|
-
nucliadb-6.3.1.
|
346
|
-
nucliadb-6.3.1.
|
347
|
-
nucliadb-6.3.1.
|
343
|
+
nucliadb-6.3.1.post3472.dist-info/METADATA,sha256=i8NumTytCL5x5Ht3oyaFDYDBa3Fa7JVz7B10ni6OHMc,4291
|
344
|
+
nucliadb-6.3.1.post3472.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
345
|
+
nucliadb-6.3.1.post3472.dist-info/entry_points.txt,sha256=XqGfgFDuY3zXQc8ewXM2TRVjTModIq851zOsgrmaXx4,1268
|
346
|
+
nucliadb-6.3.1.post3472.dist-info/top_level.txt,sha256=hwYhTVnX7jkQ9gJCkVrbqEG1M4lT2F_iPQND1fCzF80,20
|
347
|
+
nucliadb-6.3.1.post3472.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|