nv-ingest-api 2025.10.30.dev20251030__py3-none-any.whl → 2025.10.31.dev20251031__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nv-ingest-api might be problematic. Click here for more details.

@@ -43,6 +43,24 @@ class PdfConfigSchema(BaseModelNoExt):
43
43
  split_page_count: Annotated[int, Field(ge=1)] = 32
44
44
 
45
45
 
46
+ class RoutingOptionsSchema(BaseModelNoExt):
47
+ # Queue routing hint for QoS scheduler
48
+ queue_hint: Optional[str] = None
49
+
50
+ @field_validator("queue_hint")
51
+ @classmethod
52
+ def validate_queue_hint(cls, v):
53
+ if v is None:
54
+ return v
55
+ if not isinstance(v, str):
56
+ raise ValueError("queue_hint must be a string")
57
+ s = v.lower()
58
+ allowed = {"default", "immediate", "micro", "small", "medium", "large"}
59
+ if s not in allowed:
60
+ raise ValueError("queue_hint must be one of: default, immediate, micro, small, medium, large")
61
+ return s
62
+
63
+
46
64
  # Ingest Task Schemas
47
65
 
48
66
 
@@ -283,8 +301,27 @@ class IngestJobSchema(BaseModelNoExt):
283
301
  job_id: Union[str, int]
284
302
  tasks: List[IngestTaskSchema]
285
303
  tracing_options: Optional[TracingOptionsSchema] = None
304
+ routing_options: Optional[RoutingOptionsSchema] = None
286
305
  pdf_config: Optional[PdfConfigSchema] = None
287
306
 
307
+ @model_validator(mode="before")
308
+ @classmethod
309
+ def migrate_queue_hint(cls, values):
310
+ """
311
+ Backward-compatibility shim: if a legacy client sends
312
+ tracing_options.queue_hint, move it into routing_options.queue_hint.
313
+ """
314
+ try:
315
+ topt = values.get("tracing_options") or {}
316
+ ropt = values.get("routing_options") or {}
317
+ if isinstance(topt, dict) and "queue_hint" in topt and "queue_hint" not in ropt:
318
+ ropt["queue_hint"] = topt.pop("queue_hint")
319
+ values["routing_options"] = ropt
320
+ values["tracing_options"] = topt
321
+ except Exception:
322
+ pass
323
+ return values
324
+
288
325
 
289
326
  # ------------------------------------------------------------------------------
290
327
  # Utility Functions
@@ -0,0 +1,283 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2024-25, NVIDIA CORPORATION & AFFILIATES.
2
+ # All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from __future__ import annotations
6
+
7
+ from typing import Dict, Optional
8
+ import logging
9
+ import time
10
+ import random
11
+
12
+
13
+ class _SchedulingStrategy:
14
+ """
15
+ Base scheduling strategy interface. Implementations must provide a non-blocking
16
+ single-sweep attempt over non-immediate queues and return a job or None.
17
+ """
18
+
19
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
20
+ raise NotImplementedError
21
+
22
+
23
+ class _LotteryStrategy(_SchedulingStrategy):
24
+ """
25
+ Lottery scheduling with fixed weights.
26
+ Weights: micro=4, small=2, large=1, medium=1, default=1
27
+ """
28
+
29
+ def __init__(self, prioritize_immediate: bool = True) -> None:
30
+ self._weights: Dict[str, int] = {
31
+ "micro": 4,
32
+ "small": 2,
33
+ "large": 1,
34
+ "medium": 1,
35
+ "default": 1,
36
+ }
37
+ self._prioritize_immediate: bool = bool(prioritize_immediate)
38
+
39
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
40
+ # Immediate-first if enabled (non-blocking)
41
+ if self._prioritize_immediate:
42
+ try:
43
+ job = client.fetch_message(queues["immediate"], 0)
44
+ if job is not None:
45
+ return job
46
+ except TimeoutError:
47
+ pass
48
+ candidates = list(order)
49
+ weights = [self._weights[q] for q in candidates]
50
+ while candidates:
51
+ try:
52
+ chosen = random.choices(candidates, weights=weights, k=1)[0]
53
+ job = client.fetch_message(queues[chosen], 0)
54
+ if job is not None:
55
+ return job
56
+ except TimeoutError:
57
+ pass
58
+ finally:
59
+ idx = candidates.index(chosen)
60
+ del candidates[idx]
61
+ del weights[idx]
62
+ return None
63
+
64
+
65
+ class _SimpleStrategy(_SchedulingStrategy):
66
+ """
67
+ Simple strategy placeholder. Actual simple-mode handling is done in QosScheduler.fetch_next
68
+ to directly fetch from the base 'default' queue using the provided timeout.
69
+ """
70
+
71
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
72
+ # Block up to 30s on the base/default queue and return first available job
73
+ try:
74
+ return client.fetch_message(queues["default"], 30.0)
75
+ except TimeoutError:
76
+ return None
77
+
78
+
79
+ class _RoundRobinStrategy(_SchedulingStrategy):
80
+ """
81
+ Simple round-robin over non-immediate queues. Maintains rotation across calls.
82
+ """
83
+
84
+ def __init__(self, order: list[str], prioritize_immediate: bool = True) -> None:
85
+ self._order = list(order)
86
+ self._len = len(self._order)
87
+ self._idx = 0
88
+ self._prioritize_immediate: bool = bool(prioritize_immediate)
89
+
90
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
91
+ # Immediate-first if enabled (non-blocking)
92
+ if self._prioritize_immediate:
93
+ try:
94
+ job = client.fetch_message(queues["immediate"], 0)
95
+ if job is not None:
96
+ return job
97
+ except TimeoutError:
98
+ pass
99
+ start_idx = self._idx
100
+ for step in range(self._len):
101
+ i = (start_idx + step) % self._len
102
+ qname = self._order[i]
103
+ try:
104
+ job = client.fetch_message(queues[qname], 0)
105
+ if job is not None:
106
+ # advance rotation to the position after the chosen one
107
+ self._idx = (i + 1) % self._len
108
+ return job
109
+ except TimeoutError:
110
+ continue
111
+ return None
112
+
113
+
114
+ class _WeightedRoundRobinStrategy(_SchedulingStrategy):
115
+ """
116
+ Smooth Weighted Round Robin (SWRR) using weights micro=4, small=2, large=1, medium=1, default=1.
117
+ Maintains current weights across calls.
118
+ """
119
+
120
+ def __init__(self, prioritize_immediate: bool = True) -> None:
121
+ self._weights: Dict[str, int] = {
122
+ "micro": 4,
123
+ "small": 2,
124
+ "large": 1,
125
+ "medium": 1,
126
+ "default": 1,
127
+ }
128
+ self._current: Dict[str, int] = {k: 0 for k in self._weights.keys()}
129
+ self._total: int = sum(self._weights.values())
130
+ self._prioritize_immediate: bool = bool(prioritize_immediate)
131
+
132
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
133
+ # Immediate-first if enabled (non-blocking)
134
+ if self._prioritize_immediate:
135
+ try:
136
+ job = client.fetch_message(queues["immediate"], 0)
137
+ if job is not None:
138
+ return job
139
+ except TimeoutError:
140
+ pass
141
+ # Attempt up to len(order) selections per sweep, excluding queues that prove empty
142
+ active = list(order)
143
+ for _ in range(len(order)):
144
+ if not active:
145
+ break
146
+ for q in active:
147
+ self._current[q] += self._weights[q]
148
+ chosen = max(active, key=lambda q: self._current[q])
149
+ self._current[chosen] -= self._total
150
+ try:
151
+ job = client.fetch_message(queues[chosen], 0)
152
+ if job is not None:
153
+ return job
154
+ except TimeoutError:
155
+ job = None
156
+ # If no job available from chosen, exclude it for the remainder of this sweep
157
+ if job is None and chosen in active:
158
+ active.remove(chosen)
159
+ # Fallback: single non-blocking attempt for each queue in order
160
+ for q in order:
161
+ try:
162
+ job = client.fetch_message(queues[q], 0)
163
+ if job is not None:
164
+ return job
165
+ except TimeoutError:
166
+ continue
167
+ return None
168
+
169
+
170
+ class QosScheduler:
171
+ """
172
+ Simplified scheduler that fetches jobs from the default queue only.
173
+ Uses the provided timeout value when polling the broker.
174
+ """
175
+
176
+ def __init__(
177
+ self,
178
+ base_queue: str,
179
+ total_buffer_capacity: int = 1,
180
+ num_prefetch_threads: int = 0,
181
+ prefetch_poll_interval: float = 0.0,
182
+ prefetch_non_immediate: bool = False,
183
+ strategy: str = "lottery",
184
+ prioritize_immediate: bool = True,
185
+ ) -> None:
186
+ self.base_queue = base_queue
187
+
188
+ # Define all derived queues; default behavior still uses only "default"
189
+ self.queues: Dict[str, str] = {
190
+ "default": f"{base_queue}",
191
+ "immediate": f"{base_queue}_immediate",
192
+ "micro": f"{base_queue}_micro",
193
+ "small": f"{base_queue}_small",
194
+ "medium": f"{base_queue}_medium",
195
+ "large": f"{base_queue}_large",
196
+ }
197
+
198
+ # Priority order for multi-queue fetching; "immediate" always first
199
+ self._priority_order = [
200
+ "immediate",
201
+ "micro",
202
+ "small",
203
+ "medium",
204
+ "large",
205
+ "default",
206
+ ]
207
+
208
+ # Non-immediate queue order reference
209
+ self._non_immediate_order = ["micro", "small", "large", "medium", "default"]
210
+
211
+ # Logger
212
+ self._logger = logging.getLogger(__name__)
213
+
214
+ # No prefetching - just direct calls
215
+ self._total_buffer_capacity: int = int(total_buffer_capacity)
216
+ self._num_prefetch_threads: int = int(num_prefetch_threads)
217
+ self._prefetch_poll_interval: float = float(prefetch_poll_interval)
218
+ self._prefetch_non_immediate: bool = bool(prefetch_non_immediate)
219
+
220
+ # Strategy selection
221
+ self._simple_mode: bool = False
222
+ if strategy == "simple":
223
+ self._strategy_impl: _SchedulingStrategy = _SimpleStrategy()
224
+ self._simple_mode = True
225
+ elif strategy == "round_robin":
226
+ self._strategy_impl = _RoundRobinStrategy(self._non_immediate_order, prioritize_immediate)
227
+ elif strategy == "weighted_round_robin":
228
+ self._strategy_impl = _WeightedRoundRobinStrategy(prioritize_immediate)
229
+ else:
230
+ self._strategy_impl = _LotteryStrategy(prioritize_immediate)
231
+
232
+ # Context manager helpers for clean shutdown
233
+ def __enter__(self) -> "QosScheduler":
234
+ return self
235
+
236
+ def __exit__(self, exc_type, exc, tb) -> None:
237
+ self.close()
238
+
239
+ # ---------------------------- Public API ----------------------------
240
+ def close(self) -> None:
241
+ """
242
+ Cleanly close the scheduler. No-op for the current implementation
243
+ since we do not spin background threads.
244
+ """
245
+ return None
246
+
247
+ def fetch_next(self, client, timeout: float = 0.0) -> Optional[dict]:
248
+ """
249
+ Immediate-first, then strategy-based scheduling among non-immediate queues.
250
+
251
+ Behavior:
252
+ - Always check 'immediate' first (non-blocking). If present, return immediately.
253
+ - If not, select using the configured strategy (lottery, round_robin, weighted_round_robin).
254
+ - If no job is found in a full pass:
255
+ - If timeout <= 0: return None.
256
+ - Else: sleep in 0.5s increments and retry until accumulated elapsed time >= timeout.
257
+ """
258
+ # Simple mode: delegate to the strategy (blocks up to 30s on base queue)
259
+ if getattr(self, "_simple_mode", False):
260
+ return self._strategy_impl.try_once(client, self.queues, self._non_immediate_order)
261
+
262
+ start = time.monotonic()
263
+ while True:
264
+ # Strategy-based attempt (strategy may include immediate priority internally)
265
+ job = self._strategy_impl.try_once(client, self.queues, self._non_immediate_order)
266
+ if job is not None:
267
+ return job
268
+
269
+ # No job found in this sweep
270
+ if timeout <= 0:
271
+ return None
272
+
273
+ elapsed = time.monotonic() - start
274
+ if elapsed >= timeout:
275
+ return None
276
+
277
+ # Sleep up to 0.5s, but not beyond remaining timeout
278
+ remaining = timeout - elapsed
279
+ sleep_time = 0.5 if remaining > 0.5 else remaining
280
+ if sleep_time > 0:
281
+ time.sleep(sleep_time)
282
+ else:
283
+ return None
@@ -35,6 +35,7 @@ class SimpleClient(MessageBrokerClientBase):
35
35
  connection_timeout: int = 300,
36
36
  max_pool_size: int = 128,
37
37
  use_ssl: bool = False,
38
+ api_version: str = "v1",
38
39
  ):
39
40
  """
40
41
  Initialize the SimpleClient with configuration parameters.
@@ -650,6 +650,22 @@ class RedisClient(MessageBrokerClientBase):
650
650
  except Exception as e:
651
651
  logger.exception(f"{log_prefix}: Cache read error: {e}. Trying Redis.")
652
652
 
653
+ # If caller requests non-blocking behavior (timeout <= 0), attempt immediate pop.
654
+ if timeout is not None and timeout <= 0:
655
+ try:
656
+ client = self.get_client()
657
+ popped = client.lpop(channel_name)
658
+ if popped is None:
659
+ return None
660
+ try:
661
+ return json.loads(popped)
662
+ except json.JSONDecodeError as e:
663
+ logger.error(f"Failed to decode JSON from non-blocking LPOP on '{channel_name}': {e}")
664
+ return None
665
+ except Exception as e:
666
+ logger.warning(f"Non-blocking LPOP failed for '{channel_name}': {e}")
667
+ return None
668
+
653
669
  while True:
654
670
  try:
655
671
  fetch_result: Union[Dict[str, Any], List[Dict[str, Any]]]
@@ -711,6 +727,150 @@ class RedisClient(MessageBrokerClientBase):
711
727
  logger.exception(f"{log_prefix}: Unexpected error during fetch: {e}")
712
728
  raise ValueError(f"Unexpected error during fetch: {e}") from e
713
729
 
730
+ def fetch_message_from_any(self, channel_names: List[str], timeout: float = 0) -> Optional[Dict[str, Any]]:
731
+ """
732
+ Attempt to fetch a message from the first non-empty list among the provided channel names
733
+ using Redis BLPOP. If the popped item represents a fragmented message, this method will
734
+ continue popping from the same channel to reconstruct the full message.
735
+
736
+ Parameters
737
+ ----------
738
+ channel_names : List[str]
739
+ Ordered list of Redis list keys to attempt in priority order.
740
+ timeout : float, optional
741
+ Timeout in seconds to wait for any item across the provided lists. Redis supports
742
+ integer-second timeouts; sub-second values will be truncated.
743
+
744
+ Returns
745
+ -------
746
+ dict or None
747
+ The reconstructed message dictionary if an item was fetched; otherwise None on timeout.
748
+ """
749
+ if not channel_names:
750
+ return None
751
+
752
+ client = self.get_client()
753
+ blpop_timeout = int(max(0, timeout))
754
+ try:
755
+ res = client.blpop(channel_names, timeout=blpop_timeout)
756
+ except (redis.RedisError, ConnectionError) as e:
757
+ logger.debug(f"BLPOP error on {channel_names}: {e}")
758
+ return None
759
+
760
+ if res is None:
761
+ return None
762
+
763
+ list_key, first_bytes = res
764
+ if isinstance(list_key, bytes):
765
+ try:
766
+ list_key = list_key.decode("utf-8")
767
+ except Exception:
768
+ list_key = str(list_key)
769
+ # Decode first element
770
+ try:
771
+ first_msg = json.loads(first_bytes)
772
+ except json.JSONDecodeError as e:
773
+ logger.error(f"Failed to decode JSON popped from '{list_key}': {e}")
774
+ return None
775
+
776
+ expected_count: int = int(first_msg.get("fragment_count", 1))
777
+ if expected_count <= 1:
778
+ return first_msg
779
+
780
+ # Collect remaining fragments from the same list key
781
+ fragments: List[Dict[str, Any]] = [first_msg]
782
+ accumulated = 0.0
783
+ start_time = time.monotonic()
784
+ for i in range(1, expected_count):
785
+ remaining = max(0, timeout - accumulated)
786
+ per_frag_timeout = int(max(1, remaining)) if timeout else 1
787
+ try:
788
+ frag_res = client.blpop([list_key], timeout=per_frag_timeout)
789
+ except (redis.RedisError, ConnectionError) as e:
790
+ logger.error(f"BLPOP error while collecting fragments from '{list_key}': {e}")
791
+ return None
792
+ if frag_res is None:
793
+ logger.error(f"Timeout while collecting fragment {i}/{expected_count-1} from '{list_key}'")
794
+ return None
795
+ _, frag_key_bytes_or_val = frag_res
796
+ # Redis returns (key, value); we don't need the key here
797
+ frag_bytes = frag_key_bytes_or_val
798
+ try:
799
+ frag_msg = json.loads(frag_bytes)
800
+ fragments.append(frag_msg)
801
+ except json.JSONDecodeError as e:
802
+ logger.error(f"Failed to decode fragment JSON from '{list_key}': {e}")
803
+ return None
804
+ accumulated = time.monotonic() - start_time
805
+
806
+ # Combine and return
807
+ try:
808
+ return self._combine_fragments(fragments)
809
+ except Exception as e:
810
+ logger.error(f"Error combining fragments from '{list_key}': {e}")
811
+ return None
812
+
813
+ def fetch_message_from_any_with_key(
814
+ self, channel_names: List[str], timeout: float = 0
815
+ ) -> Optional[Tuple[str, Dict[str, Any]]]:
816
+ """
817
+ Like fetch_message_from_any(), but returns the Redis list key together with the message.
818
+ This is useful for higher-level schedulers that need to apply per-category quotas.
819
+ """
820
+ if not channel_names:
821
+ return None
822
+
823
+ client = self.get_client()
824
+ blpop_timeout = int(max(0, timeout))
825
+ try:
826
+ res = client.blpop(channel_names, timeout=blpop_timeout)
827
+ except (redis.RedisError, ConnectionError) as e:
828
+ logger.debug(f"BLPOP error on {channel_names}: {e}")
829
+ return None
830
+
831
+ if res is None:
832
+ return None
833
+
834
+ list_key, first_bytes = res
835
+ try:
836
+ first_msg = json.loads(first_bytes)
837
+ except json.JSONDecodeError as e:
838
+ logger.error(f"Failed to decode JSON popped from '{list_key}': {e}")
839
+ return None
840
+
841
+ expected_count: int = int(first_msg.get("fragment_count", 1))
842
+ if expected_count <= 1:
843
+ return list_key, first_msg
844
+
845
+ fragments: List[Dict[str, Any]] = [first_msg]
846
+ accumulated = 0.0
847
+ start_time = time.monotonic()
848
+ for i in range(1, expected_count):
849
+ remaining = max(0, timeout - accumulated)
850
+ per_frag_timeout = int(max(1, remaining)) if timeout else 1
851
+ try:
852
+ frag_res = client.blpop([list_key], timeout=per_frag_timeout)
853
+ except (redis.RedisError, ConnectionError) as e:
854
+ logger.error(f"BLPOP error while collecting fragments from '{list_key}': {e}")
855
+ return None
856
+ if frag_res is None:
857
+ logger.error(f"Timeout while collecting fragment {i}/{expected_count-1} from '{list_key}'")
858
+ return None
859
+ _, frag_bytes = frag_res
860
+ try:
861
+ frag_msg = json.loads(frag_bytes)
862
+ fragments.append(frag_msg)
863
+ except json.JSONDecodeError as e:
864
+ logger.error(f"Failed to decode fragment JSON from '{list_key}': {e}")
865
+ return None
866
+ accumulated = time.monotonic() - start_time
867
+
868
+ try:
869
+ return list_key, self._combine_fragments(fragments)
870
+ except Exception as e:
871
+ logger.error(f"Error combining fragments from '{list_key}': {e}")
872
+ return None
873
+
714
874
  @staticmethod
715
875
  def _combine_fragments(fragments: List[Dict[str, Any]]) -> Dict[str, Any]:
716
876
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nv-ingest-api
3
- Version: 2025.10.30.dev20251030
3
+ Version: 2025.10.31.dev20251031
4
4
  Summary: Python module with core document ingestion functions.
5
5
  Author-email: Jeremy Dyer <jdyer@nvidia.com>
6
6
  License: Apache License
@@ -85,7 +85,7 @@ nv_ingest_api/internal/schemas/message_brokers/request_schema.py,sha256=LZX_wXDx
85
85
  nv_ingest_api/internal/schemas/message_brokers/response_schema.py,sha256=4b275HlzBSzpmuE2wdoeaGKPCdKki3wuWldtRIfrj8w,727
86
86
  nv_ingest_api/internal/schemas/meta/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
87
87
  nv_ingest_api/internal/schemas/meta/base_model_noext.py,sha256=8hXU1uuiqZ6t8EsoZ8vlC5EFf2zSZrKEX133FcfZMwI,316
88
- nv_ingest_api/internal/schemas/meta/ingest_job_schema.py,sha256=jisoD3r6ZT7mlc3gsNDcnMu4Mo3AlwupHgZhJx382H4,10875
88
+ nv_ingest_api/internal/schemas/meta/ingest_job_schema.py,sha256=auvKHFJm9FquYRS6Ro7GawvgNhszT-1uG3ADMy4E_B8,12240
89
89
  nv_ingest_api/internal/schemas/meta/metadata_schema.py,sha256=nHS2PwYE7YwuTUotvUd0hP8a-5f9uefy6_G3mMH4UyQ,12321
90
90
  nv_ingest_api/internal/schemas/meta/udf.py,sha256=GgzqbZOlipQgMpDhbXLqbF8xrHenj_hMNqhR_P-1ynw,779
91
91
  nv_ingest_api/internal/schemas/mutate/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
@@ -140,10 +140,11 @@ nv_ingest_api/util/logging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
140
140
  nv_ingest_api/util/logging/configuration.py,sha256=05KR3LOS-PCqU-Io__iiKG_Ds730eKxciklFfNeId3w,3126
141
141
  nv_ingest_api/util/logging/sanitize.py,sha256=-dIbmvLTevrTRd18QKUQQMV4hBk6pStWP_7_VtDDctg,2584
142
142
  nv_ingest_api/util/message_brokers/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
143
+ nv_ingest_api/util/message_brokers/qos_scheduler.py,sha256=TdpjRyUfqR9y1v9SNxZaIN9ZgxVMlEGvLFhfUD7jjO8,10339
143
144
  nv_ingest_api/util/message_brokers/simple_message_broker/__init__.py,sha256=WaQ3CWIpIKWEivT5kL-bkmzcSQKLGFNFHdXHUJjqZFs,325
144
145
  nv_ingest_api/util/message_brokers/simple_message_broker/broker.py,sha256=PekxaxVcAa9k1wgUtozlr04SW3sAeqYJE-wdVBZf9eo,17264
145
146
  nv_ingest_api/util/message_brokers/simple_message_broker/ordered_message_queue.py,sha256=3p-LRqG8qLnsfEhBNf73_DG22C08JKahTqUvPLS2Apg,2554
146
- nv_ingest_api/util/message_brokers/simple_message_broker/simple_client.py,sha256=CCRAbq2EBH2quX9UTfuBbz3tTMDnWqhEF33roFwbyuk,16484
147
+ nv_ingest_api/util/message_brokers/simple_message_broker/simple_client.py,sha256=BFuegFsbU_YB_98gzhs8oU2by4_iVzIDanT0nJdjJ7g,16517
147
148
  nv_ingest_api/util/metadata/__init__.py,sha256=HIHfzSig66GT0Uk8qsGBm_f13fKYcPtItBicRUWOOVA,183
148
149
  nv_ingest_api/util/metadata/aggregators.py,sha256=YYdvJ1E04eGFZKKHUxXoH6mzLg8nor9Smvnv0qzqK5w,15988
149
150
  nv_ingest_api/util/multi_processing/__init__.py,sha256=4fojP8Rp_5Hu1YAkqGylqTyEZ-HBVVEunn5Z9I99swA,242
@@ -157,7 +158,7 @@ nv_ingest_api/util/service_clients/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusX
157
158
  nv_ingest_api/util/service_clients/client_base.py,sha256=eCOeq3Rr6Xnnsh-oHszYlQTOffQyzsT8s43V4V8H_h8,2716
158
159
  nv_ingest_api/util/service_clients/kafka/__init__.py,sha256=uLsBITo_XfgbwpzqXUm1IYX6XlZrTfx6T1cIhdILwG8,140
159
160
  nv_ingest_api/util/service_clients/redis/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
160
- nv_ingest_api/util/service_clients/redis/redis_client.py,sha256=3NLecvIvVN1v-sA7d7G-_f6qJVZyfJE2H8Iu5KG3Aew,37417
161
+ nv_ingest_api/util/service_clients/redis/redis_client.py,sha256=b7rqJKYW27lmuSjTTho1sO2-q093cfeXARx8JgCHZ-o,44042
161
162
  nv_ingest_api/util/service_clients/rest/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
162
163
  nv_ingest_api/util/service_clients/rest/rest_client.py,sha256=7ymPxhuN9SP8nPSVepqqbvUxXPaTVunq2aC2bDbg98g,23684
163
164
  nv_ingest_api/util/string_processing/__init__.py,sha256=mkwHthyS-IILcLcL1tJYeF6mpqX3pxEw5aUzDGjTSeU,1411
@@ -165,10 +166,10 @@ nv_ingest_api/util/string_processing/configuration.py,sha256=2HS08msccuPCT0fn_jf
165
166
  nv_ingest_api/util/string_processing/yaml.py,sha256=4Zdmc4474lUZn6kznqaNTlQJwsmRnnJQZ-DvAWLu-zo,2678
166
167
  nv_ingest_api/util/system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
167
168
  nv_ingest_api/util/system/hardware_info.py,sha256=1UFM8XE6M3pgQcpbVsCsqDQ7Dj-zzptL-XRE-DEu9UA,27213
168
- nv_ingest_api-2025.10.30.dev20251030.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
169
+ nv_ingest_api-2025.10.31.dev20251031.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
169
170
  udfs/__init__.py,sha256=pXFqPgXIUqHDfj7SAR1Q19tt8KwGv_iMvhHyziz4AYM,205
170
171
  udfs/llm_summarizer_udf.py,sha256=lH5c5NHoT-5ecHC3og_40u1Ujta8SpsKU4X0e4wzbMU,7314
171
- nv_ingest_api-2025.10.30.dev20251030.dist-info/METADATA,sha256=Gv6plGuAgs8l0Zb7RDZv4eyLhcL-ajOSAKgH8SW3aRI,14106
172
- nv_ingest_api-2025.10.30.dev20251030.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
173
- nv_ingest_api-2025.10.30.dev20251030.dist-info/top_level.txt,sha256=I1lseG9FF0CH93SPx4kFblsxFuv190cfzaas_CLNIiw,19
174
- nv_ingest_api-2025.10.30.dev20251030.dist-info/RECORD,,
172
+ nv_ingest_api-2025.10.31.dev20251031.dist-info/METADATA,sha256=U7us7h5rNX4pHGeu8TCWRO3FhJYOKQvLotHb-ivnAaQ,14106
173
+ nv_ingest_api-2025.10.31.dev20251031.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
174
+ nv_ingest_api-2025.10.31.dev20251031.dist-info/top_level.txt,sha256=I1lseG9FF0CH93SPx4kFblsxFuv190cfzaas_CLNIiw,19
175
+ nv_ingest_api-2025.10.31.dev20251031.dist-info/RECORD,,