nucliadb 6.7.2.post4911__py3-none-any.whl → 6.7.2.post4917__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nucliadb might be problematic. Click here for more details.

@@ -447,6 +447,10 @@ class DummyPredictEngine(PredictEngine):
447
447
  self.cluster_url = "http://localhost:8000"
448
448
  self.public_url = "http://localhost:8000"
449
449
  self.calls = []
450
+ self.ndjson_reasoning = [
451
+ b'{"chunk": {"type": "reasoning", "text": "dummy "}}\n',
452
+ b'{"chunk": {"type": "reasoning", "text": "reasoning"}}\n',
453
+ ]
450
454
  self.ndjson_answer = [
451
455
  b'{"chunk": {"type": "text", "text": "valid "}}\n',
452
456
  b'{"chunk": {"type": "text", "text": "answer "}}\n',
@@ -482,8 +486,11 @@ class DummyPredictEngine(PredictEngine):
482
486
  self.calls.append(("chat_query_ndjson", item))
483
487
 
484
488
  async def generate():
485
- for item in self.ndjson_answer:
486
- yield GenerativeChunk.model_validate_json(item)
489
+ if item.reasoning is not False:
490
+ for chunk in self.ndjson_reasoning:
491
+ yield GenerativeChunk.model_validate_json(chunk)
492
+ for chunk in self.ndjson_answer:
493
+ yield GenerativeChunk.model_validate_json(chunk)
487
494
 
488
495
  return (DUMMY_LEARNING_ID, DUMMY_LEARNING_MODEL, generate())
489
496
 
@@ -20,7 +20,7 @@
20
20
  import dataclasses
21
21
  import functools
22
22
  import json
23
- from typing import AsyncGenerator, Optional, cast
23
+ from typing import AsyncGenerator, Optional, Union, cast
24
24
 
25
25
  from nuclia_models.common.consumption import Consumption
26
26
  from nuclia_models.predict.generative_responses import (
@@ -28,6 +28,7 @@ from nuclia_models.predict.generative_responses import (
28
28
  GenerativeChunk,
29
29
  JSONGenerativeResponse,
30
30
  MetaGenerativeResponse,
31
+ ReasoningGenerativeResponse,
31
32
  StatusGenerativeResponse,
32
33
  TextGenerativeResponse,
33
34
  )
@@ -102,6 +103,7 @@ from nucliadb_models.search import (
102
103
  PromptContext,
103
104
  PromptContextOrder,
104
105
  RagStrategyName,
106
+ ReasoningAskResponseItem,
105
107
  Relations,
106
108
  RelationsAskResponseItem,
107
109
  RetrievalAskResponseItem,
@@ -167,6 +169,7 @@ class AskResult:
167
169
 
168
170
  # Computed from the predict chat answer stream
169
171
  self._answer_text = ""
172
+ self._reasoning_text: Optional[str] = None
170
173
  self._object: Optional[JSONGenerativeResponse] = None
171
174
  self._status: Optional[StatusGenerativeResponse] = None
172
175
  self._citations: Optional[CitationsGenerativeResponse] = None
@@ -220,12 +223,23 @@ class AskResult:
220
223
  async def _stream(self) -> AsyncGenerator[AskResponseItemType, None]:
221
224
  # First, stream out the predict answer
222
225
  first_chunk_yielded = False
226
+ first_reasoning_chunk_yielded = False
223
227
  with self.metrics.time("stream_predict_answer"):
224
228
  async for answer_chunk in self._stream_predict_answer_text():
225
- yield AnswerAskResponseItem(text=answer_chunk)
226
- if not first_chunk_yielded:
227
- self.metrics.record_first_chunk_yielded()
228
- first_chunk_yielded = True
229
+ if isinstance(answer_chunk, TextGenerativeResponse):
230
+ yield AnswerAskResponseItem(text=answer_chunk.text)
231
+ if not first_chunk_yielded:
232
+ self.metrics.record_first_chunk_yielded()
233
+ first_chunk_yielded = True
234
+ elif isinstance(answer_chunk, ReasoningGenerativeResponse):
235
+ yield ReasoningAskResponseItem(text=answer_chunk.text)
236
+ if not first_reasoning_chunk_yielded:
237
+ self.metrics.record_first_reasoning_chunk_yielded()
238
+ first_reasoning_chunk_yielded = True
239
+ else:
240
+ # This is a trick so mypy generates an error if this branch can be reached,
241
+ # that is, if we are missing some ifs
242
+ _a: int = "a"
229
243
 
230
244
  if self._object is not None:
231
245
  yield JSONAskResponseItem(object=self._object.object)
@@ -274,8 +288,10 @@ class AskResult:
274
288
  audit_answer = json.dumps(self._object.object).encode("utf-8")
275
289
  self.auditor.audit(
276
290
  text_answer=audit_answer,
291
+ text_reasoning=self._reasoning_text,
277
292
  generative_answer_time=self.metrics["stream_predict_answer"],
278
293
  generative_answer_first_chunk_time=self.metrics.get_first_chunk_time() or 0,
294
+ generative_reasoning_first_chunk_time=self.metrics.get_first_reasoning_chunk_time(),
279
295
  rephrase_time=self.metrics.get("rephrase"),
280
296
  status_code=self.status_code,
281
297
  )
@@ -384,6 +400,7 @@ class AskResult:
384
400
 
385
401
  response = SyncAskResponse(
386
402
  answer=self._answer_text,
403
+ reasoning=self._reasoning_text,
387
404
  answer_json=answer_json,
388
405
  status=self.status_code.prettify(),
389
406
  relations=self._relations,
@@ -420,7 +437,9 @@ class AskResult:
420
437
  )
421
438
  return self._relations
422
439
 
423
- async def _stream_predict_answer_text(self) -> AsyncGenerator[str, None]:
440
+ async def _stream_predict_answer_text(
441
+ self,
442
+ ) -> AsyncGenerator[Union[TextGenerativeResponse, ReasoningGenerativeResponse], None]:
424
443
  """
425
444
  Reads the stream of the generative model, yielding the answer text but also parsing
426
445
  other items like status codes, citations and miscellaneous metadata.
@@ -435,7 +454,13 @@ class AskResult:
435
454
  item = generative_chunk.chunk
436
455
  if isinstance(item, TextGenerativeResponse):
437
456
  self._answer_text += item.text
438
- yield item.text
457
+ yield item
458
+ elif isinstance(item, ReasoningGenerativeResponse):
459
+ if self._reasoning_text is None:
460
+ self._reasoning_text = item.text
461
+ else:
462
+ self._reasoning_text += item.text
463
+ yield item
439
464
  elif isinstance(item, JSONGenerativeResponse):
440
465
  self._object = item
441
466
  elif isinstance(item, StatusGenerativeResponse):
@@ -559,11 +584,13 @@ async def ask(
559
584
  origin=origin,
560
585
  generative_answer_time=0,
561
586
  generative_answer_first_chunk_time=0,
587
+ generative_reasoning_first_chunk_time=None,
562
588
  rephrase_time=metrics.get("rephrase"),
563
589
  user_query=user_query,
564
590
  rephrased_query=rephrased_query,
565
591
  retrieval_rephrase_query=err.main_query.rephrased_query if err.main_query else None,
566
592
  text_answer=b"",
593
+ text_reasoning=None,
567
594
  status_code=AnswerStatusCode.NO_RETRIEVAL_DATA,
568
595
  chat_history=chat_history,
569
596
  query_context={},
@@ -625,6 +652,7 @@ async def ask(
625
652
  json_schema=ask_request.answer_json_schema,
626
653
  rerank_context=False,
627
654
  top_k=ask_request.top_k,
655
+ reasoning=ask_request.reasoning,
628
656
  )
629
657
 
630
658
  nuclia_learning_id = None
@@ -308,11 +308,13 @@ def maybe_audit_chat(
308
308
  origin: str,
309
309
  generative_answer_time: float,
310
310
  generative_answer_first_chunk_time: float,
311
+ generative_reasoning_first_chunk_time: Optional[float],
311
312
  rephrase_time: Optional[float],
312
313
  user_query: str,
313
314
  rephrased_query: Optional[str],
314
315
  retrieval_rephrase_query: Optional[str],
315
316
  text_answer: bytes,
317
+ text_reasoning: Optional[str],
316
318
  status_code: AnswerStatusCode,
317
319
  chat_history: list[ChatContextMessage],
318
320
  query_context: PromptContext,
@@ -344,12 +346,14 @@ def maybe_audit_chat(
344
346
  question=user_query,
345
347
  generative_answer_time=generative_answer_time,
346
348
  generative_answer_first_chunk_time=generative_answer_first_chunk_time,
349
+ generative_reasoning_first_chunk_time=generative_reasoning_first_chunk_time,
347
350
  rephrase_time=rephrase_time,
348
351
  rephrased_question=rephrased_query,
349
352
  retrieval_rephrased_question=retrieval_rephrase_query,
350
353
  chat_context=chat_history_context,
351
354
  retrieved_context=chat_retrieved_context,
352
355
  answer=audit_answer,
356
+ reasoning=text_reasoning,
353
357
  learning_id=learning_id,
354
358
  status_code=int(status_code.value),
355
359
  model=model,
@@ -401,8 +405,10 @@ class ChatAuditor:
401
405
  def audit(
402
406
  self,
403
407
  text_answer: bytes,
408
+ text_reasoning: Optional[str],
404
409
  generative_answer_time: float,
405
410
  generative_answer_first_chunk_time: float,
411
+ generative_reasoning_first_chunk_time: Optional[float],
406
412
  rephrase_time: Optional[float],
407
413
  status_code: AnswerStatusCode,
408
414
  ):
@@ -416,8 +422,10 @@ class ChatAuditor:
416
422
  retrieval_rephrase_query=self.retrieval_rephrased_query,
417
423
  generative_answer_time=generative_answer_time,
418
424
  generative_answer_first_chunk_time=generative_answer_first_chunk_time,
425
+ generative_reasoning_first_chunk_time=generative_reasoning_first_chunk_time,
419
426
  rephrase_time=rephrase_time,
420
427
  text_answer=text_answer,
428
+ text_reasoning=text_reasoning,
421
429
  status_code=status_code,
422
430
  chat_history=self.chat_history,
423
431
  query_context=self.query_context,
@@ -49,6 +49,10 @@ buckets = [
49
49
  ]
50
50
 
51
51
  generative_first_chunk_histogram = metrics.Histogram(
52
+ name="generative_reasoning_first_chunk",
53
+ buckets=buckets,
54
+ )
55
+ reasoning_first_chunk_histogram = metrics.Histogram(
52
56
  name="generative_first_chunk",
53
57
  buckets=buckets,
54
58
  )
@@ -107,12 +111,24 @@ class AskMetrics(Metrics):
107
111
  super().__init__(id="ask")
108
112
  self.global_start = time.monotonic()
109
113
  self.first_chunk_yielded_at: Optional[float] = None
114
+ self.first_reasoning_chunk_yielded_at: Optional[float] = None
110
115
 
111
116
  def record_first_chunk_yielded(self):
112
117
  self.first_chunk_yielded_at = time.monotonic()
113
118
  generative_first_chunk_histogram.observe(self.first_chunk_yielded_at - self.global_start)
114
119
 
120
+ def record_first_reasoning_chunk_yielded(self):
121
+ self.first_reasoning_chunk_yielded_at = time.monotonic()
122
+ reasoning_first_chunk_histogram.observe(
123
+ self.first_reasoning_chunk_yielded_at - self.global_start
124
+ )
125
+
115
126
  def get_first_chunk_time(self) -> Optional[float]:
116
127
  if self.first_chunk_yielded_at is None:
117
128
  return None
118
129
  return self.first_chunk_yielded_at - self.global_start
130
+
131
+ def get_first_reasoning_chunk_time(self) -> Optional[float]:
132
+ if self.first_reasoning_chunk_yielded_at is None:
133
+ return None
134
+ return self.first_reasoning_chunk_yielded_at - self.global_start
@@ -28,6 +28,7 @@ from multidict import CIMultiDictProxy
28
28
  from nuclia_models.predict.generative_responses import (
29
29
  GenerativeChunk,
30
30
  JSONGenerativeResponse,
31
+ ReasoningGenerativeResponse,
31
32
  StatusGenerativeResponse,
32
33
  TextGenerativeResponse,
33
34
  )
@@ -140,8 +141,10 @@ async def predict_proxy(
140
141
  client_type=client_type,
141
142
  origin=origin,
142
143
  text_answer=content,
144
+ text_reasoning=None,
143
145
  generative_answer_time=metrics[PREDICT_ANSWER_METRIC],
144
146
  generative_answer_first_chunk_time=None,
147
+ generative_reasoning_first_chunk_time=None,
145
148
  status_code=AnswerStatusCode(str(llm_status_code)),
146
149
  )
147
150
 
@@ -173,23 +176,33 @@ async def chat_streaming_generator(
173
176
  is_json: bool,
174
177
  ):
175
178
  first = True
179
+ first_reasoning = True
176
180
  status_code = AnswerStatusCode.ERROR.value
177
181
  text_answer = ""
182
+ text_reasoning = ""
178
183
  json_object = None
179
184
  metrics = AskMetrics()
180
185
  with metrics.time(PREDICT_ANSWER_METRIC):
181
186
  async for chunk in predict_response.content:
182
- if first:
183
- metrics.record_first_chunk_yielded()
184
- first = False
185
-
186
187
  yield chunk
187
188
 
188
189
  if is_json:
189
190
  try:
190
191
  parsed_chunk = GenerativeChunk.model_validate_json(chunk).chunk
192
+ if first and isinstance(
193
+ parsed_chunk,
194
+ (TextGenerativeResponse, JSONGenerativeResponse, StatusGenerativeResponse),
195
+ ):
196
+ metrics.record_first_chunk_yielded()
197
+ first = False
198
+
191
199
  if isinstance(parsed_chunk, TextGenerativeResponse):
192
200
  text_answer += parsed_chunk.text
201
+ elif isinstance(parsed_chunk, ReasoningGenerativeResponse):
202
+ if first_reasoning:
203
+ metrics.record_first_reasoning_chunk_yielded()
204
+ first_reasoning = False
205
+ text_reasoning += parsed_chunk.text
193
206
  elif isinstance(parsed_chunk, JSONGenerativeResponse):
194
207
  json_object = parsed_chunk.object
195
208
  elif isinstance(parsed_chunk, StatusGenerativeResponse):
@@ -218,8 +231,10 @@ async def chat_streaming_generator(
218
231
  client_type=client_type,
219
232
  origin=origin,
220
233
  text_answer=text_answer.encode() if json_object is None else json.dumps(json_object).encode(),
234
+ text_reasoning=text_reasoning if text_reasoning else None,
221
235
  generative_answer_time=metrics[PREDICT_ANSWER_METRIC],
222
236
  generative_answer_first_chunk_time=metrics.get_first_chunk_time(),
237
+ generative_reasoning_first_chunk_time=metrics.get_first_reasoning_chunk_time(),
223
238
  status_code=AnswerStatusCode(status_code),
224
239
  )
225
240
 
@@ -232,8 +247,10 @@ def audit_predict_proxy_endpoint(
232
247
  client_type: NucliaDBClientType,
233
248
  origin: str,
234
249
  text_answer: bytes,
250
+ text_reasoning: Optional[str],
235
251
  generative_answer_time: float,
236
252
  generative_answer_first_chunk_time: Optional[float],
253
+ generative_reasoning_first_chunk_time: Optional[float],
237
254
  status_code: AnswerStatusCode,
238
255
  ):
239
256
  maybe_audit_chat(
@@ -250,8 +267,10 @@ def audit_predict_proxy_endpoint(
250
267
  query_context_order={},
251
268
  model=headers.get(NUCLIA_LEARNING_MODEL_HEADER),
252
269
  text_answer=text_answer,
270
+ text_reasoning=text_reasoning,
253
271
  generative_answer_time=generative_answer_time,
254
272
  generative_answer_first_chunk_time=generative_answer_first_chunk_time or 0,
273
+ generative_reasoning_first_chunk_time=generative_reasoning_first_chunk_time,
255
274
  rephrase_time=None,
256
275
  status_code=status_code,
257
276
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nucliadb
3
- Version: 6.7.2.post4911
3
+ Version: 6.7.2.post4917
4
4
  Summary: NucliaDB
5
5
  Author-email: Nuclia <nucliadb@nuclia.com>
6
6
  License-Expression: AGPL-3.0-or-later
@@ -19,13 +19,13 @@ Classifier: Programming Language :: Python :: 3.12
19
19
  Classifier: Programming Language :: Python :: 3 :: Only
20
20
  Requires-Python: <4,>=3.9
21
21
  Description-Content-Type: text/markdown
22
- Requires-Dist: nucliadb-telemetry[all]>=6.7.2.post4911
23
- Requires-Dist: nucliadb-utils[cache,fastapi,storages]>=6.7.2.post4911
24
- Requires-Dist: nucliadb-protos>=6.7.2.post4911
25
- Requires-Dist: nucliadb-models>=6.7.2.post4911
26
- Requires-Dist: nidx-protos>=6.7.2.post4911
22
+ Requires-Dist: nucliadb-telemetry[all]>=6.7.2.post4917
23
+ Requires-Dist: nucliadb-utils[cache,fastapi,storages]>=6.7.2.post4917
24
+ Requires-Dist: nucliadb-protos>=6.7.2.post4917
25
+ Requires-Dist: nucliadb-models>=6.7.2.post4917
26
+ Requires-Dist: nidx-protos>=6.7.2.post4917
27
27
  Requires-Dist: nucliadb-admin-assets>=1.0.0.post1224
28
- Requires-Dist: nuclia-models>=0.46.0
28
+ Requires-Dist: nuclia-models>=0.47.0
29
29
  Requires-Dist: uvicorn[standard]
30
30
  Requires-Dist: argdantic
31
31
  Requires-Dist: aiohttp>=3.11.11
@@ -217,7 +217,7 @@ nucliadb/search/__init__.py,sha256=tnypbqcH4nBHbGpkINudhKgdLKpwXQCvDtPchUlsyY4,1
217
217
  nucliadb/search/app.py,sha256=-WEX1AZRA8R_9aeOo9ovOTwjXW_7VfwWN7N2ccSoqXg,3387
218
218
  nucliadb/search/lifecycle.py,sha256=hiylV-lxsAWkqTCulXBg0EIfMQdejSr8Zar0L_GLFT8,2218
219
219
  nucliadb/search/openapi.py,sha256=t3Wo_4baTrfPftg2BHsyLWNZ1MYn7ZRdW7ht-wFOgRs,1016
220
- nucliadb/search/predict.py,sha256=PdcJz--hK884GTTZ4m_QldR6dfNuI0vHpUWumJ2NGoA,23207
220
+ nucliadb/search/predict.py,sha256=7AtA5K0YBydjUwP0mcMox66CZMI250_QBIOm6XVXxJs,23559
221
221
  nucliadb/search/predict_models.py,sha256=ozuQZGWAK7v8W6UOk3xXQ_zW7YUNtRy_4l5LE5BuT_A,8172
222
222
  nucliadb/search/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
223
223
  nucliadb/search/run.py,sha256=aFb-CXRi_C8YMpP_ivNj8KW1BYhADj88y8K9Lr_nUPI,1402
@@ -257,9 +257,9 @@ nucliadb/search/search/graph_strategy.py,sha256=VNa-XLQnJometUTbDJumhHf_LUHphHBo
257
257
  nucliadb/search/search/hydrator.py,sha256=CL8XYq5QMSL0hvOND7XnHcJrM-BeVRvpwN5lL55yAD0,6963
258
258
  nucliadb/search/search/ingestion_agents.py,sha256=IK6yOPEF9rST_uoqspdVdPk0pldjDhc8Fz1eRovnySI,3311
259
259
  nucliadb/search/search/merge.py,sha256=XiRBsxhYPshPV7lZXD-9E259KZOPIf4I2tKosY0lPo4,22470
260
- nucliadb/search/search/metrics.py,sha256=3I6IN0qDSmqIvUaWJmT3rt-Jyjs6LcvnKI8ZqCiuJPY,3501
260
+ nucliadb/search/search/metrics.py,sha256=yodhoyn-smFdS7rKUn_XXNNXT93WqVdZj9F3TCbbQTI,4160
261
261
  nucliadb/search/search/paragraphs.py,sha256=pNAEiYqJGGUVcEf7xf-PFMVqz0PX4Qb-WNG-_zPGN2o,7799
262
- nucliadb/search/search/predict_proxy.py,sha256=Df8F5K-oS4TIXJc_y8UDViJTo7st5L0kMgxYPFZ39Vk,8806
262
+ nucliadb/search/search/predict_proxy.py,sha256=bU18TKE7GzSt8Hqr38jE-2Lz_GPJRTFTGsFvMdPZWIM,9872
263
263
  nucliadb/search/search/query.py,sha256=lYCesbUv-B7IyVFQoCCurcxl_Azc5nq3jtVQJ9tk1Ao,11552
264
264
  nucliadb/search/search/rank_fusion.py,sha256=xZtXhbmKb_56gs73u6KkFm2efvTATOSMmpOV2wrAIqE,9613
265
265
  nucliadb/search/search/rerankers.py,sha256=2LNC0I28EXriffMuBlOYzjQq0vCTjpCxaK29f852n3s,7473
@@ -267,11 +267,11 @@ nucliadb/search/search/shards.py,sha256=mc5DK-MoCv9AFhlXlOFHbPvetcyNDzTFOJ5rimK8
267
267
  nucliadb/search/search/summarize.py,sha256=3lLdwsM28W505bKvmK7JLXmz7kcjd8Hp70LQs391ofY,5087
268
268
  nucliadb/search/search/utils.py,sha256=ajRIXfdTF67dBVahQCXW-rSv6gJpUMPt3QhJrWqArTQ,2175
269
269
  nucliadb/search/search/chat/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
270
- nucliadb/search/search/chat/ask.py,sha256=c8gvGkspW5KUbDhVzcUfd7eF7SMfqzwKfCPidJniRLI,39417
270
+ nucliadb/search/search/chat/ask.py,sha256=Akq5nCl-0P9Hcil2hzuOEIlhEA9IxJtlQo4dz54BG3s,40884
271
271
  nucliadb/search/search/chat/exceptions.py,sha256=Siy4GXW2L7oPhIR86H3WHBhE9lkV4A4YaAszuGGUf54,1356
272
272
  nucliadb/search/search/chat/images.py,sha256=PA8VWxT5_HUGfW1ULhKTK46UBsVyINtWWqEM1ulzX1E,3095
273
273
  nucliadb/search/search/chat/prompt.py,sha256=Bk69WVki5XIzXFYO6o1uQw1feHtuMwfEx_A1TT3piR0,54136
274
- nucliadb/search/search/chat/query.py,sha256=AhOPMf68p2BRjKz7CdkcUIDMANtxr00oGt42iKUUjAw,16698
274
+ nucliadb/search/search/chat/query.py,sha256=qWrwVEX_GrDV7LFRyC21BURtl-WsRt8BkIocY8njkKM,17147
275
275
  nucliadb/search/search/query_parser/__init__.py,sha256=cp15ZcFnHvpcu_5-aK2A4uUyvuZVV_MJn4bIXMa20ks,835
276
276
  nucliadb/search/search/query_parser/exceptions.py,sha256=sVl9gRNzhE-s480LBBVkiXzNRbKhYRQN5F3it5tNNp8,939
277
277
  nucliadb/search/search/query_parser/fetcher.py,sha256=0Eg_7x9BaAQ1AuTK6NXQMUoGFAXIZiMRurR32tydeNM,17198
@@ -379,8 +379,8 @@ nucliadb/writer/tus/local.py,sha256=7jYa_w9b-N90jWgN2sQKkNcomqn6JMVBOVeDOVYJHto,
379
379
  nucliadb/writer/tus/s3.py,sha256=vu1BGg4VqJ_x2P1u2BxqPKlSfw5orT_a3R-Ln5oPUpU,8483
380
380
  nucliadb/writer/tus/storage.py,sha256=ToqwjoYnjI4oIcwzkhha_MPxi-k4Jk3Lt55zRwaC1SM,2903
381
381
  nucliadb/writer/tus/utils.py,sha256=MSdVbRsRSZVdkaum69_0wku7X3p5wlZf4nr6E0GMKbw,2556
382
- nucliadb-6.7.2.post4911.dist-info/METADATA,sha256=sWlsMPT1JH0UwpkUlpsNhj5RYdZyyd77MzrsmFh7fcc,4158
383
- nucliadb-6.7.2.post4911.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
384
- nucliadb-6.7.2.post4911.dist-info/entry_points.txt,sha256=XqGfgFDuY3zXQc8ewXM2TRVjTModIq851zOsgrmaXx4,1268
385
- nucliadb-6.7.2.post4911.dist-info/top_level.txt,sha256=hwYhTVnX7jkQ9gJCkVrbqEG1M4lT2F_iPQND1fCzF80,20
386
- nucliadb-6.7.2.post4911.dist-info/RECORD,,
382
+ nucliadb-6.7.2.post4917.dist-info/METADATA,sha256=J1t2FzIphJvmbhKEK2lJG0Xy-MC85goxjoTLD4lod4Q,4158
383
+ nucliadb-6.7.2.post4917.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
384
+ nucliadb-6.7.2.post4917.dist-info/entry_points.txt,sha256=XqGfgFDuY3zXQc8ewXM2TRVjTModIq851zOsgrmaXx4,1268
385
+ nucliadb-6.7.2.post4917.dist-info/top_level.txt,sha256=hwYhTVnX7jkQ9gJCkVrbqEG1M4lT2F_iPQND1fCzF80,20
386
+ nucliadb-6.7.2.post4917.dist-info/RECORD,,