nv-ingest-api 2025.10.4.dev20251004__py3-none-any.whl → 2025.11.2.dev20251102__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nv-ingest-api might be problematic. Click here for more details.

Files changed (34) hide show
  1. nv_ingest_api/internal/extract/image/chart_extractor.py +7 -3
  2. nv_ingest_api/internal/extract/image/infographic_extractor.py +7 -3
  3. nv_ingest_api/internal/extract/image/table_extractor.py +7 -3
  4. nv_ingest_api/internal/extract/pdf/engines/pdfium.py +6 -4
  5. nv_ingest_api/internal/primitives/nim/model_interface/ocr.py +11 -4
  6. nv_ingest_api/internal/primitives/nim/model_interface/parakeet.py +4 -0
  7. nv_ingest_api/internal/primitives/nim/nim_client.py +158 -15
  8. nv_ingest_api/internal/schemas/extract/extract_audio_schema.py +4 -2
  9. nv_ingest_api/internal/schemas/extract/extract_chart_schema.py +10 -1
  10. nv_ingest_api/internal/schemas/extract/extract_docx_schema.py +4 -2
  11. nv_ingest_api/internal/schemas/extract/extract_image_schema.py +4 -2
  12. nv_ingest_api/internal/schemas/extract/extract_infographic_schema.py +10 -1
  13. nv_ingest_api/internal/schemas/extract/extract_pdf_schema.py +6 -4
  14. nv_ingest_api/internal/schemas/extract/extract_pptx_schema.py +4 -2
  15. nv_ingest_api/internal/schemas/extract/extract_table_schema.py +9 -1
  16. nv_ingest_api/internal/schemas/meta/ingest_job_schema.py +56 -1
  17. nv_ingest_api/internal/schemas/meta/metadata_schema.py +9 -0
  18. nv_ingest_api/internal/schemas/mixins.py +39 -0
  19. nv_ingest_api/internal/schemas/transform/transform_text_embedding_schema.py +4 -0
  20. nv_ingest_api/internal/transform/embed_text.py +82 -0
  21. nv_ingest_api/util/dataloader/dataloader.py +20 -9
  22. nv_ingest_api/util/image_processing/transforms.py +67 -1
  23. nv_ingest_api/util/message_brokers/qos_scheduler.py +283 -0
  24. nv_ingest_api/util/message_brokers/simple_message_broker/simple_client.py +1 -0
  25. nv_ingest_api/util/multi_processing/mp_pool_singleton.py +8 -2
  26. nv_ingest_api/util/service_clients/redis/redis_client.py +160 -0
  27. nv_ingest_api/util/service_clients/rest/rest_client.py +42 -3
  28. nv_ingest_api/util/string_processing/yaml.py +41 -4
  29. {nv_ingest_api-2025.10.4.dev20251004.dist-info → nv_ingest_api-2025.11.2.dev20251102.dist-info}/METADATA +2 -1
  30. {nv_ingest_api-2025.10.4.dev20251004.dist-info → nv_ingest_api-2025.11.2.dev20251102.dist-info}/RECORD +34 -32
  31. udfs/llm_summarizer_udf.py +132 -137
  32. {nv_ingest_api-2025.10.4.dev20251004.dist-info → nv_ingest_api-2025.11.2.dev20251102.dist-info}/WHEEL +0 -0
  33. {nv_ingest_api-2025.10.4.dev20251004.dist-info → nv_ingest_api-2025.11.2.dev20251102.dist-info}/licenses/LICENSE +0 -0
  34. {nv_ingest_api-2025.10.4.dev20251004.dist-info → nv_ingest_api-2025.11.2.dev20251102.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,283 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2024-25, NVIDIA CORPORATION & AFFILIATES.
2
+ # All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from __future__ import annotations
6
+
7
+ from typing import Dict, Optional
8
+ import logging
9
+ import time
10
+ import random
11
+
12
+
13
+ class _SchedulingStrategy:
14
+ """
15
+ Base scheduling strategy interface. Implementations must provide a non-blocking
16
+ single-sweep attempt over non-immediate queues and return a job or None.
17
+ """
18
+
19
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
20
+ raise NotImplementedError
21
+
22
+
23
+ class _LotteryStrategy(_SchedulingStrategy):
24
+ """
25
+ Lottery scheduling with fixed weights.
26
+ Weights: micro=4, small=2, large=1, medium=1, default=1
27
+ """
28
+
29
+ def __init__(self, prioritize_immediate: bool = True) -> None:
30
+ self._weights: Dict[str, int] = {
31
+ "micro": 4,
32
+ "small": 2,
33
+ "large": 1,
34
+ "medium": 1,
35
+ "default": 1,
36
+ }
37
+ self._prioritize_immediate: bool = bool(prioritize_immediate)
38
+
39
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
40
+ # Immediate-first if enabled (non-blocking)
41
+ if self._prioritize_immediate:
42
+ try:
43
+ job = client.fetch_message(queues["immediate"], 0)
44
+ if job is not None:
45
+ return job
46
+ except TimeoutError:
47
+ pass
48
+ candidates = list(order)
49
+ weights = [self._weights[q] for q in candidates]
50
+ while candidates:
51
+ try:
52
+ chosen = random.choices(candidates, weights=weights, k=1)[0]
53
+ job = client.fetch_message(queues[chosen], 0)
54
+ if job is not None:
55
+ return job
56
+ except TimeoutError:
57
+ pass
58
+ finally:
59
+ idx = candidates.index(chosen)
60
+ del candidates[idx]
61
+ del weights[idx]
62
+ return None
63
+
64
+
65
+ class _SimpleStrategy(_SchedulingStrategy):
66
+ """
67
+ Simple strategy placeholder. Actual simple-mode handling is done in QosScheduler.fetch_next
68
+ to directly fetch from the base 'default' queue using the provided timeout.
69
+ """
70
+
71
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
72
+ # Block up to 30s on the base/default queue and return first available job
73
+ try:
74
+ return client.fetch_message(queues["default"], 30.0)
75
+ except TimeoutError:
76
+ return None
77
+
78
+
79
+ class _RoundRobinStrategy(_SchedulingStrategy):
80
+ """
81
+ Simple round-robin over non-immediate queues. Maintains rotation across calls.
82
+ """
83
+
84
+ def __init__(self, order: list[str], prioritize_immediate: bool = True) -> None:
85
+ self._order = list(order)
86
+ self._len = len(self._order)
87
+ self._idx = 0
88
+ self._prioritize_immediate: bool = bool(prioritize_immediate)
89
+
90
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
91
+ # Immediate-first if enabled (non-blocking)
92
+ if self._prioritize_immediate:
93
+ try:
94
+ job = client.fetch_message(queues["immediate"], 0)
95
+ if job is not None:
96
+ return job
97
+ except TimeoutError:
98
+ pass
99
+ start_idx = self._idx
100
+ for step in range(self._len):
101
+ i = (start_idx + step) % self._len
102
+ qname = self._order[i]
103
+ try:
104
+ job = client.fetch_message(queues[qname], 0)
105
+ if job is not None:
106
+ # advance rotation to the position after the chosen one
107
+ self._idx = (i + 1) % self._len
108
+ return job
109
+ except TimeoutError:
110
+ continue
111
+ return None
112
+
113
+
114
+ class _WeightedRoundRobinStrategy(_SchedulingStrategy):
115
+ """
116
+ Smooth Weighted Round Robin (SWRR) using weights micro=4, small=2, large=1, medium=1, default=1.
117
+ Maintains current weights across calls.
118
+ """
119
+
120
+ def __init__(self, prioritize_immediate: bool = True) -> None:
121
+ self._weights: Dict[str, int] = {
122
+ "micro": 4,
123
+ "small": 2,
124
+ "large": 1,
125
+ "medium": 1,
126
+ "default": 1,
127
+ }
128
+ self._current: Dict[str, int] = {k: 0 for k in self._weights.keys()}
129
+ self._total: int = sum(self._weights.values())
130
+ self._prioritize_immediate: bool = bool(prioritize_immediate)
131
+
132
+ def try_once(self, client, queues: Dict[str, str], order: list[str]) -> Optional[dict]:
133
+ # Immediate-first if enabled (non-blocking)
134
+ if self._prioritize_immediate:
135
+ try:
136
+ job = client.fetch_message(queues["immediate"], 0)
137
+ if job is not None:
138
+ return job
139
+ except TimeoutError:
140
+ pass
141
+ # Attempt up to len(order) selections per sweep, excluding queues that prove empty
142
+ active = list(order)
143
+ for _ in range(len(order)):
144
+ if not active:
145
+ break
146
+ for q in active:
147
+ self._current[q] += self._weights[q]
148
+ chosen = max(active, key=lambda q: self._current[q])
149
+ self._current[chosen] -= self._total
150
+ try:
151
+ job = client.fetch_message(queues[chosen], 0)
152
+ if job is not None:
153
+ return job
154
+ except TimeoutError:
155
+ job = None
156
+ # If no job available from chosen, exclude it for the remainder of this sweep
157
+ if job is None and chosen in active:
158
+ active.remove(chosen)
159
+ # Fallback: single non-blocking attempt for each queue in order
160
+ for q in order:
161
+ try:
162
+ job = client.fetch_message(queues[q], 0)
163
+ if job is not None:
164
+ return job
165
+ except TimeoutError:
166
+ continue
167
+ return None
168
+
169
+
170
+ class QosScheduler:
171
+ """
172
+ Simplified scheduler that fetches jobs from the default queue only.
173
+ Uses the provided timeout value when polling the broker.
174
+ """
175
+
176
+ def __init__(
177
+ self,
178
+ base_queue: str,
179
+ total_buffer_capacity: int = 1,
180
+ num_prefetch_threads: int = 0,
181
+ prefetch_poll_interval: float = 0.0,
182
+ prefetch_non_immediate: bool = False,
183
+ strategy: str = "lottery",
184
+ prioritize_immediate: bool = True,
185
+ ) -> None:
186
+ self.base_queue = base_queue
187
+
188
+ # Define all derived queues; default behavior still uses only "default"
189
+ self.queues: Dict[str, str] = {
190
+ "default": f"{base_queue}",
191
+ "immediate": f"{base_queue}_immediate",
192
+ "micro": f"{base_queue}_micro",
193
+ "small": f"{base_queue}_small",
194
+ "medium": f"{base_queue}_medium",
195
+ "large": f"{base_queue}_large",
196
+ }
197
+
198
+ # Priority order for multi-queue fetching; "immediate" always first
199
+ self._priority_order = [
200
+ "immediate",
201
+ "micro",
202
+ "small",
203
+ "medium",
204
+ "large",
205
+ "default",
206
+ ]
207
+
208
+ # Non-immediate queue order reference
209
+ self._non_immediate_order = ["micro", "small", "large", "medium", "default"]
210
+
211
+ # Logger
212
+ self._logger = logging.getLogger(__name__)
213
+
214
+ # No prefetching - just direct calls
215
+ self._total_buffer_capacity: int = int(total_buffer_capacity)
216
+ self._num_prefetch_threads: int = int(num_prefetch_threads)
217
+ self._prefetch_poll_interval: float = float(prefetch_poll_interval)
218
+ self._prefetch_non_immediate: bool = bool(prefetch_non_immediate)
219
+
220
+ # Strategy selection
221
+ self._simple_mode: bool = False
222
+ if strategy == "simple":
223
+ self._strategy_impl: _SchedulingStrategy = _SimpleStrategy()
224
+ self._simple_mode = True
225
+ elif strategy == "round_robin":
226
+ self._strategy_impl = _RoundRobinStrategy(self._non_immediate_order, prioritize_immediate)
227
+ elif strategy == "weighted_round_robin":
228
+ self._strategy_impl = _WeightedRoundRobinStrategy(prioritize_immediate)
229
+ else:
230
+ self._strategy_impl = _LotteryStrategy(prioritize_immediate)
231
+
232
+ # Context manager helpers for clean shutdown
233
+ def __enter__(self) -> "QosScheduler":
234
+ return self
235
+
236
+ def __exit__(self, exc_type, exc, tb) -> None:
237
+ self.close()
238
+
239
+ # ---------------------------- Public API ----------------------------
240
+ def close(self) -> None:
241
+ """
242
+ Cleanly close the scheduler. No-op for the current implementation
243
+ since we do not spin background threads.
244
+ """
245
+ return None
246
+
247
+ def fetch_next(self, client, timeout: float = 0.0) -> Optional[dict]:
248
+ """
249
+ Immediate-first, then strategy-based scheduling among non-immediate queues.
250
+
251
+ Behavior:
252
+ - Always check 'immediate' first (non-blocking). If present, return immediately.
253
+ - If not, select using the configured strategy (lottery, round_robin, weighted_round_robin).
254
+ - If no job is found in a full pass:
255
+ - If timeout <= 0: return None.
256
+ - Else: sleep in 0.5s increments and retry until accumulated elapsed time >= timeout.
257
+ """
258
+ # Simple mode: delegate to the strategy (blocks up to 30s on base queue)
259
+ if getattr(self, "_simple_mode", False):
260
+ return self._strategy_impl.try_once(client, self.queues, self._non_immediate_order)
261
+
262
+ start = time.monotonic()
263
+ while True:
264
+ # Strategy-based attempt (strategy may include immediate priority internally)
265
+ job = self._strategy_impl.try_once(client, self.queues, self._non_immediate_order)
266
+ if job is not None:
267
+ return job
268
+
269
+ # No job found in this sweep
270
+ if timeout <= 0:
271
+ return None
272
+
273
+ elapsed = time.monotonic() - start
274
+ if elapsed >= timeout:
275
+ return None
276
+
277
+ # Sleep up to 0.5s, but not beyond remaining timeout
278
+ remaining = timeout - elapsed
279
+ sleep_time = 0.5 if remaining > 0.5 else remaining
280
+ if sleep_time > 0:
281
+ time.sleep(sleep_time)
282
+ else:
283
+ return None
@@ -35,6 +35,7 @@ class SimpleClient(MessageBrokerClientBase):
35
35
  connection_timeout: int = 300,
36
36
  max_pool_size: int = 128,
37
37
  use_ssl: bool = False,
38
+ api_version: str = "v1",
38
39
  ):
39
40
  """
40
41
  Initialize the SimpleClient with configuration parameters.
@@ -5,8 +5,9 @@
5
5
 
6
6
  import logging
7
7
  import math
8
- import multiprocessing as mp
9
8
  import os
9
+ import sys
10
+ import multiprocessing as mp
10
11
  from threading import Lock
11
12
  from typing import Any, Callable, Optional
12
13
 
@@ -103,7 +104,12 @@ class ProcessWorkerPoolSingleton:
103
104
  The total number of worker processes to start.
104
105
  """
105
106
  self._total_workers = total_max_workers
106
- self._context: mp.context.ForkContext = mp.get_context("fork")
107
+
108
+ start_method = "fork"
109
+ if sys.platform.lower() == "darwin":
110
+ start_method = "spawn"
111
+ self._context: mp.context.ForkContext = mp.get_context(start_method)
112
+
107
113
  # Bounded task queue: maximum tasks queued = 2 * total_max_workers.
108
114
  self._task_queue: mp.Queue = self._context.Queue(maxsize=2 * total_max_workers)
109
115
  self._next_task_id: int = 0
@@ -650,6 +650,22 @@ class RedisClient(MessageBrokerClientBase):
650
650
  except Exception as e:
651
651
  logger.exception(f"{log_prefix}: Cache read error: {e}. Trying Redis.")
652
652
 
653
+ # If caller requests non-blocking behavior (timeout <= 0), attempt immediate pop.
654
+ if timeout is not None and timeout <= 0:
655
+ try:
656
+ client = self.get_client()
657
+ popped = client.lpop(channel_name)
658
+ if popped is None:
659
+ return None
660
+ try:
661
+ return json.loads(popped)
662
+ except json.JSONDecodeError as e:
663
+ logger.error(f"Failed to decode JSON from non-blocking LPOP on '{channel_name}': {e}")
664
+ return None
665
+ except Exception as e:
666
+ logger.warning(f"Non-blocking LPOP failed for '{channel_name}': {e}")
667
+ return None
668
+
653
669
  while True:
654
670
  try:
655
671
  fetch_result: Union[Dict[str, Any], List[Dict[str, Any]]]
@@ -711,6 +727,150 @@ class RedisClient(MessageBrokerClientBase):
711
727
  logger.exception(f"{log_prefix}: Unexpected error during fetch: {e}")
712
728
  raise ValueError(f"Unexpected error during fetch: {e}") from e
713
729
 
730
+ def fetch_message_from_any(self, channel_names: List[str], timeout: float = 0) -> Optional[Dict[str, Any]]:
731
+ """
732
+ Attempt to fetch a message from the first non-empty list among the provided channel names
733
+ using Redis BLPOP. If the popped item represents a fragmented message, this method will
734
+ continue popping from the same channel to reconstruct the full message.
735
+
736
+ Parameters
737
+ ----------
738
+ channel_names : List[str]
739
+ Ordered list of Redis list keys to attempt in priority order.
740
+ timeout : float, optional
741
+ Timeout in seconds to wait for any item across the provided lists. Redis supports
742
+ integer-second timeouts; sub-second values will be truncated.
743
+
744
+ Returns
745
+ -------
746
+ dict or None
747
+ The reconstructed message dictionary if an item was fetched; otherwise None on timeout.
748
+ """
749
+ if not channel_names:
750
+ return None
751
+
752
+ client = self.get_client()
753
+ blpop_timeout = int(max(0, timeout))
754
+ try:
755
+ res = client.blpop(channel_names, timeout=blpop_timeout)
756
+ except (redis.RedisError, ConnectionError) as e:
757
+ logger.debug(f"BLPOP error on {channel_names}: {e}")
758
+ return None
759
+
760
+ if res is None:
761
+ return None
762
+
763
+ list_key, first_bytes = res
764
+ if isinstance(list_key, bytes):
765
+ try:
766
+ list_key = list_key.decode("utf-8")
767
+ except Exception:
768
+ list_key = str(list_key)
769
+ # Decode first element
770
+ try:
771
+ first_msg = json.loads(first_bytes)
772
+ except json.JSONDecodeError as e:
773
+ logger.error(f"Failed to decode JSON popped from '{list_key}': {e}")
774
+ return None
775
+
776
+ expected_count: int = int(first_msg.get("fragment_count", 1))
777
+ if expected_count <= 1:
778
+ return first_msg
779
+
780
+ # Collect remaining fragments from the same list key
781
+ fragments: List[Dict[str, Any]] = [first_msg]
782
+ accumulated = 0.0
783
+ start_time = time.monotonic()
784
+ for i in range(1, expected_count):
785
+ remaining = max(0, timeout - accumulated)
786
+ per_frag_timeout = int(max(1, remaining)) if timeout else 1
787
+ try:
788
+ frag_res = client.blpop([list_key], timeout=per_frag_timeout)
789
+ except (redis.RedisError, ConnectionError) as e:
790
+ logger.error(f"BLPOP error while collecting fragments from '{list_key}': {e}")
791
+ return None
792
+ if frag_res is None:
793
+ logger.error(f"Timeout while collecting fragment {i}/{expected_count-1} from '{list_key}'")
794
+ return None
795
+ _, frag_key_bytes_or_val = frag_res
796
+ # Redis returns (key, value); we don't need the key here
797
+ frag_bytes = frag_key_bytes_or_val
798
+ try:
799
+ frag_msg = json.loads(frag_bytes)
800
+ fragments.append(frag_msg)
801
+ except json.JSONDecodeError as e:
802
+ logger.error(f"Failed to decode fragment JSON from '{list_key}': {e}")
803
+ return None
804
+ accumulated = time.monotonic() - start_time
805
+
806
+ # Combine and return
807
+ try:
808
+ return self._combine_fragments(fragments)
809
+ except Exception as e:
810
+ logger.error(f"Error combining fragments from '{list_key}': {e}")
811
+ return None
812
+
813
+ def fetch_message_from_any_with_key(
814
+ self, channel_names: List[str], timeout: float = 0
815
+ ) -> Optional[Tuple[str, Dict[str, Any]]]:
816
+ """
817
+ Like fetch_message_from_any(), but returns the Redis list key together with the message.
818
+ This is useful for higher-level schedulers that need to apply per-category quotas.
819
+ """
820
+ if not channel_names:
821
+ return None
822
+
823
+ client = self.get_client()
824
+ blpop_timeout = int(max(0, timeout))
825
+ try:
826
+ res = client.blpop(channel_names, timeout=blpop_timeout)
827
+ except (redis.RedisError, ConnectionError) as e:
828
+ logger.debug(f"BLPOP error on {channel_names}: {e}")
829
+ return None
830
+
831
+ if res is None:
832
+ return None
833
+
834
+ list_key, first_bytes = res
835
+ try:
836
+ first_msg = json.loads(first_bytes)
837
+ except json.JSONDecodeError as e:
838
+ logger.error(f"Failed to decode JSON popped from '{list_key}': {e}")
839
+ return None
840
+
841
+ expected_count: int = int(first_msg.get("fragment_count", 1))
842
+ if expected_count <= 1:
843
+ return list_key, first_msg
844
+
845
+ fragments: List[Dict[str, Any]] = [first_msg]
846
+ accumulated = 0.0
847
+ start_time = time.monotonic()
848
+ for i in range(1, expected_count):
849
+ remaining = max(0, timeout - accumulated)
850
+ per_frag_timeout = int(max(1, remaining)) if timeout else 1
851
+ try:
852
+ frag_res = client.blpop([list_key], timeout=per_frag_timeout)
853
+ except (redis.RedisError, ConnectionError) as e:
854
+ logger.error(f"BLPOP error while collecting fragments from '{list_key}': {e}")
855
+ return None
856
+ if frag_res is None:
857
+ logger.error(f"Timeout while collecting fragment {i}/{expected_count-1} from '{list_key}'")
858
+ return None
859
+ _, frag_bytes = frag_res
860
+ try:
861
+ frag_msg = json.loads(frag_bytes)
862
+ fragments.append(frag_msg)
863
+ except json.JSONDecodeError as e:
864
+ logger.error(f"Failed to decode fragment JSON from '{list_key}': {e}")
865
+ return None
866
+ accumulated = time.monotonic() - start_time
867
+
868
+ try:
869
+ return list_key, self._combine_fragments(fragments)
870
+ except Exception as e:
871
+ logger.error(f"Error combining fragments from '{list_key}': {e}")
872
+ return None
873
+
714
874
  @staticmethod
715
875
  def _combine_fragments(fragments: List[Dict[str, Any]]) -> Dict[str, Any]:
716
876
  """
@@ -103,6 +103,17 @@ class RestClient(MessageBrokerClientBase):
103
103
  Default timeout in seconds for waiting for data after connection. Default is None.
104
104
  http_allocator : Optional[Callable[[], Any]], optional
105
105
  A callable that returns an HTTP client instance. If None, `requests.Session()` is used.
106
+ **kwargs : dict
107
+ Additional keyword arguments. Supported keys:
108
+ - api_version : str, optional
109
+ API version to use ('v1' or 'v2'). Defaults to 'v1' if not specified.
110
+ Invalid versions will log a warning and fall back to 'v1'.
111
+ - base_url : str, optional
112
+ Override the generated base URL.
113
+ - headers : dict, optional
114
+ Additional headers to include in requests.
115
+ - auth : optional
116
+ Authentication configuration for requests.
106
117
 
107
118
  Returns
108
119
  -------
@@ -137,13 +148,30 @@ class RestClient(MessageBrokerClientBase):
137
148
  )
138
149
  self._client = requests.Session()
139
150
 
140
- self._submit_endpoint: str = "/v1/submit_job"
141
- self._fetch_endpoint: str = "/v1/fetch_job"
151
+ # Validate and normalize API version to prevent misconfiguration
152
+ # Default to v1 for backwards compatibility if not explicitly provided
153
+ VALID_API_VERSIONS = {"v1", "v2"}
154
+ raw_api_version = kwargs.get("api_version", "v1")
155
+ api_version = str(raw_api_version).strip().lower()
156
+
157
+ if api_version not in VALID_API_VERSIONS:
158
+ logger.warning(
159
+ f"Invalid API version '{raw_api_version}' specified. "
160
+ f"Valid versions are: {VALID_API_VERSIONS}. Falling back to 'v1'."
161
+ )
162
+ api_version = "v1"
163
+
164
+ self._api_version = api_version
165
+ self._submit_endpoint: str = f"/{api_version}/submit_job"
166
+ self._fetch_endpoint: str = f"/{api_version}/fetch_job"
142
167
  self._base_url: str = kwargs.get("base_url") or self._generate_url(self._host, self._port)
143
168
  self._headers = kwargs.get("headers", {})
144
169
  self._auth = kwargs.get("auth", None)
145
170
 
146
171
  logger.debug(f"RestClient base URL set to: {self._base_url}")
172
+ logger.info(
173
+ f"RestClient using API version: {api_version} (endpoints: {self._submit_endpoint}, {self._fetch_endpoint})"
174
+ )
147
175
 
148
176
  @staticmethod
149
177
  def _generate_url(host: str, port: int) -> str:
@@ -308,7 +336,18 @@ class RestClient(MessageBrokerClientBase):
308
336
 
309
337
  retries: int = 0
310
338
  url: str = f"{self._base_url}{self._fetch_endpoint}/{job_id}"
311
- req_timeout: Tuple[float, Optional[float]] = self._timeout
339
+ # Derive per-call timeout if provided; otherwise use default
340
+ if timeout is None:
341
+ req_timeout: Tuple[float, Optional[float]] = self._timeout
342
+ else:
343
+ if isinstance(timeout, tuple):
344
+ # Expect (connect, read)
345
+ connect_t = float(timeout[0])
346
+ read_t = None if (len(timeout) < 2 or timeout[1] is None) else float(timeout[1])
347
+ req_timeout = (connect_t, read_t)
348
+ else:
349
+ # Single float means override read timeout, keep a small connect timeout
350
+ req_timeout = (min(self._default_connect_timeout, 5.0), float(timeout))
312
351
 
313
352
  while True:
314
353
  result: Optional[Any] = None
@@ -1,5 +1,6 @@
1
1
  import os
2
2
  import re
3
+ from typing import Optional
3
4
 
4
5
  # This regex finds all forms of environment variables:
5
6
  # $VAR, ${VAR}, $VAR|default, and ${VAR|default}
@@ -20,12 +21,46 @@ def _replacer(match: re.Match) -> str:
20
21
  var_name = match.group("braced") or match.group("named")
21
22
  default_val = match.group("braced_default") or match.group("named_default")
22
23
 
23
- # Get value from environment, or use default.
24
- value = os.environ.get(var_name, default_val)
24
+ # First try the primary env var
25
+ value = os.environ.get(var_name)
26
+ if value is not None:
27
+ return value
25
28
 
26
- if value is None:
29
+ # If primary is missing, try the default.
30
+ resolved_default = _resolve_default_with_single_fallback(default_val)
31
+
32
+ if resolved_default is None:
27
33
  return ""
28
- return value
34
+
35
+ return resolved_default
36
+
37
+
38
+ def _is_var_ref(token: str) -> Optional[str]:
39
+ """If token is a $VAR or ${VAR} reference, return VAR name; else None."""
40
+ if not token:
41
+ return None
42
+ if token.startswith("${") and token.endswith("}"):
43
+ inner = token[2:-1]
44
+ return inner if re.fullmatch(r"\w+", inner) else None
45
+ if token.startswith("$"):
46
+ inner = token[1:]
47
+ return inner if re.fullmatch(r"\w+", inner) else None
48
+ return None
49
+
50
+
51
+ def _resolve_default_with_single_fallback(default_val: Optional[str]) -> Optional[str]:
52
+ """
53
+ Support a single-level fallback where the default itself can be another env var.
54
+ For example, in $A|$B or ${A|$B}, we try B if A missing.
55
+ """
56
+ if default_val is None:
57
+ return None
58
+
59
+ var = _is_var_ref(default_val)
60
+ if var is not None:
61
+ return os.environ.get(var, None)
62
+
63
+ return default_val
29
64
 
30
65
 
31
66
  def substitute_env_vars_in_yaml_content(raw_content: str) -> str:
@@ -35,6 +70,8 @@ def substitute_env_vars_in_yaml_content(raw_content: str) -> str:
35
70
  This function finds all occurrences of environment variable placeholders
36
71
  ($VAR, ${VAR}, $VAR|default, ${VAR|default}) in the input string
37
72
  and replaces them with their corresponding environment variable values.
73
+ Also supports a single fallback to another env var: $VAR|$OTHER, ${VAR|$OTHER}
74
+ Quoted defaults are preserved EXACTLY as written (e.g., 'a,b' keeps quotes).
38
75
 
39
76
  Args:
40
77
  raw_content: The raw string content of a YAML file.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nv-ingest-api
3
- Version: 2025.10.4.dev20251004
3
+ Version: 2025.11.2.dev20251102
4
4
  Summary: Python module with core document ingestion functions.
5
5
  Author-email: Jeremy Dyer <jdyer@nvidia.com>
6
6
  License: Apache License
@@ -222,6 +222,7 @@ Requires-Dist: fsspec>=2025.5.1
222
222
  Requires-Dist: universal_pathlib>=0.2.6
223
223
  Requires-Dist: ffmpeg-python==0.2.0
224
224
  Requires-Dist: tritonclient
225
+ Requires-Dist: glom
225
226
  Dynamic: license-file
226
227
 
227
228
  # nv-ingest-api