nv-ingest 2025.12.10.dev20251210__py3-none-any.whl → 2025.12.18.dev20251218__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nv_ingest/api/main.py CHANGED
@@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
23
23
  app = FastAPI(
24
24
  title="NV-Ingest Microservice",
25
25
  description="Service for ingesting heterogenous datatypes",
26
- version="25.4.2",
26
+ version="26.1.0",
27
27
  contact={
28
28
  "name": "NVIDIA Corporation",
29
29
  "url": "https://nvidia.com",
@@ -162,6 +162,11 @@ def build_logging_config_from_env() -> LoggingConfig:
162
162
  if key not in os.environ:
163
163
  os.environ[key] = default_value
164
164
 
165
+ # For PRODUCTION mode, also suppress nv-ingest module INFO logs
166
+ if preset_level == "PRODUCTION":
167
+ logging.getLogger("nv_ingest").setLevel(logging.WARNING)
168
+ logging.getLogger("nv_ingest_api").setLevel(logging.WARNING)
169
+
165
170
  logger.info(f"Applied Ray logging preset: {preset_level}")
166
171
 
167
172
  # Get log level from environment, default to INFO
@@ -324,6 +329,7 @@ def launch_pipeline(
324
329
  pipeline_config = resolve_static_replicas(pipeline_config)
325
330
 
326
331
  # Pretty print the final pipeline configuration (after replica resolution)
332
+ # INFO level so it shows in docker/helm deployments; quiet mode suppresses in library mode
327
333
  pretty_output = pretty_print_pipeline_config(pipeline_config, config_path=None)
328
334
  logger.info("\n" + pretty_output)
329
335
 
@@ -150,7 +150,7 @@ if __name__ == "__main__":
150
150
  os.environ["OCR_GRPC_ENDPOINT"] = "localhost:8010"
151
151
  os.environ["OCR_INFER_PROTOCOL"] = "grpc"
152
152
  os.environ["OCR_MODEL_NAME"] = "paddle"
153
- os.environ["NEMORETRIEVER_PARSE_HTTP_ENDPOINT"] = "https://integrate.api.nvidia.com/v1/chat/completions"
153
+ os.environ["NEMOTRON_PARSE_HTTP_ENDPOINT"] = "https://integrate.api.nvidia.com/v1/chat/completions"
154
154
  os.environ["VLM_CAPTION_ENDPOINT"] = "https://integrate.api.nvidia.com/v1/chat/completions"
155
155
  os.environ["VLM_CAPTION_MODEL_NAME"] = "nvidia/nemotron-nano-12b-v2-vl"
156
156
  logger.info("Environment variables set.")
@@ -170,23 +170,23 @@ if __name__ == "__main__":
170
170
  yolox_graphic_elements_auth,
171
171
  yolox_graphic_elements_protocol,
172
172
  ) = get_nim_service("yolox_graphic_elements")
173
- nemoretriever_parse_grpc, nemoretriever_parse_http, nemoretriever_parse_auth, nemoretriever_parse_protocol = (
174
- get_nim_service("nemoretriever_parse")
173
+ nemotron_parse_grpc, nemotron_parse_http, nemotron_parse_auth, nemotron_parse_protocol = get_nim_service(
174
+ "nemotron_parse"
175
175
  )
176
176
  ocr_grpc, ocr_http, ocr_auth, ocr_protocol = get_nim_service("ocr")
177
177
 
178
- model_name = os.environ.get("NEMORETRIEVER_PARSE_MODEL_NAME", "nvidia/nemoretriever-parse")
178
+ model_name = os.environ.get("NEMOTRON_PARSE_MODEL_NAME", "nvidia/nemotron-parse")
179
179
  pdf_extractor_config = {
180
180
  "pdfium_config": {
181
181
  "auth_token": yolox_auth, # All auth tokens are the same for the moment
182
182
  "yolox_endpoints": (yolox_grpc, yolox_http),
183
183
  "yolox_infer_protocol": yolox_protocol,
184
184
  },
185
- "nemoretriever_parse_config": {
186
- "auth_token": nemoretriever_parse_auth,
187
- "nemoretriever_parse_endpoints": (nemoretriever_parse_grpc, nemoretriever_parse_http),
188
- "nemoretriever_parse_infer_protocol": nemoretriever_parse_protocol,
189
- "nemoretriever_parse_model_name": model_name,
185
+ "nemotron_parse_config": {
186
+ "auth_token": nemotron_parse_auth,
187
+ "nemotron_parse_endpoints": (nemotron_parse_grpc, nemotron_parse_http),
188
+ "nemotron_parse_infer_protocol": nemotron_parse_protocol,
189
+ "nemotron_parse_model_name": model_name,
190
190
  "yolox_endpoints": (yolox_grpc, yolox_http),
191
191
  "yolox_infer_protocol": yolox_protocol,
192
192
  },
@@ -0,0 +1,64 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2024-25, NVIDIA CORPORATION & AFFILIATES.
2
+ # All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ # Added this no-op UDF ray stage to the pipeline to help speed up the LLM api calls
6
+
7
+ """
8
+ UDF Parallel Stage - A high-concurrency no-op stage for parallel UDF execution.
9
+
10
+ This stage does nothing except pass messages through, but with high replica count
11
+ it provides a parallel execution pool for UDFs to achieve N-way concurrency.
12
+ """
13
+
14
+ import logging
15
+ from typing import Any, Optional
16
+ from pydantic import BaseModel
17
+ import ray
18
+
19
+ from nv_ingest.framework.orchestration.ray.stages.meta.ray_actor_stage_base import RayActorStage
20
+ from nv_ingest.framework.util.flow_control.udf_intercept import udf_intercept_hook
21
+ from nv_ingest_api.internal.primitives.tracing.tagging import traceable
22
+ from nv_ingest_api.util.exception_handlers.decorators import (
23
+ nv_ingest_node_failure_try_except,
24
+ )
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ @ray.remote
30
+ class UDFParallelStage(RayActorStage):
31
+ """
32
+ A no-op pass-through stage designed for parallel UDF execution.
33
+
34
+ This stage simply returns the input message unchanged, but when configured
35
+ with multiple replicas, it provides a high-concurrency pool for UDFs to
36
+ achieve parallel execution without blocking.
37
+ """
38
+
39
+ def __init__(self, config: BaseModel, stage_name: Optional[str] = None) -> None:
40
+ super().__init__(config, stage_name=stage_name)
41
+ logger.info(f"UDFParallelStage initialized: {stage_name}")
42
+
43
+ @nv_ingest_node_failure_try_except()
44
+ @traceable()
45
+ @udf_intercept_hook()
46
+ def on_data(self, message: Any) -> Any:
47
+ """
48
+ Pass-through processing that simply returns the message unchanged.
49
+
50
+ The @udf_intercept_hook decorator allows UDFs to target this stage,
51
+ and multiple replicas provide parallel execution capacity.
52
+
53
+ Parameters
54
+ ----------
55
+ message : Any
56
+ The incoming control message.
57
+
58
+ Returns
59
+ -------
60
+ Any
61
+ The unmodified control message.
62
+ """
63
+ # No-op: just return the message
64
+ return message
@@ -3,6 +3,7 @@
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
5
  import logging
6
+ import os
6
7
  from typing import Union, Optional, TextIO
7
8
 
8
9
 
@@ -23,6 +24,34 @@ from nv_ingest.framework.orchestration.execution.helpers import (
23
24
  logger = logging.getLogger(__name__)
24
25
 
25
26
 
27
+ def _configure_quiet_mode():
28
+ """
29
+ Configure environment for quiet/production logging in library mode.
30
+
31
+ Sets INGEST_RAY_LOG_LEVEL=PRODUCTION if not already set by user, which:
32
+ - Sets Ray logging to ERROR level (suppresses INFO/WARNING)
33
+ - Disables Ray usage stats collection
34
+ - Disables Ray import warnings
35
+
36
+ Also silences other common warnings that are noisy in library mode.
37
+ """
38
+ # Only set if user hasn't explicitly configured
39
+ if "INGEST_RAY_LOG_LEVEL" not in os.environ:
40
+ os.environ["INGEST_RAY_LOG_LEVEL"] = "PRODUCTION"
41
+
42
+ # Silence Ray accelerator env var warning
43
+ if "RAY_ACCEL_ENV_VAR_OVERRIDE_ON_ZERO" not in os.environ:
44
+ os.environ["RAY_ACCEL_ENV_VAR_OVERRIDE_ON_ZERO"] = "0"
45
+
46
+ # Disable OTEL tracing export errors (no collector expected in library mode)
47
+ if "OTEL_SDK_DISABLED" not in os.environ:
48
+ os.environ["OTEL_SDK_DISABLED"] = "true"
49
+
50
+ # Set nv-ingest module loggers to WARNING to suppress INFO level startup messages
51
+ logging.getLogger("nv_ingest").setLevel(logging.WARNING)
52
+ logging.getLogger("nv_ingest_api").setLevel(logging.WARNING)
53
+
54
+
26
55
  def run_pipeline(
27
56
  pipeline_config: Optional[PipelineConfigSchema] = None,
28
57
  block: bool = True,
@@ -32,6 +61,7 @@ def run_pipeline(
32
61
  stdout: Optional[TextIO] = None,
33
62
  stderr: Optional[TextIO] = None,
34
63
  libmode: bool = True,
64
+ quiet: Optional[bool] = None,
35
65
  ) -> Union[RayPipelineInterface, float, RayPipelineSubprocessInterface]:
36
66
  """
37
67
  Launch and manage a pipeline using configuration.
@@ -65,6 +95,10 @@ def run_pipeline(
65
95
  libmode : bool, default=True
66
96
  If True and pipeline_config is None, loads the default libmode pipeline configuration.
67
97
  If False, requires pipeline_config to be provided.
98
+ quiet : Optional[bool], default=None
99
+ If True, configures logging for minimal output (PRODUCTION preset, suppresses
100
+ INFO-level startup messages). If None, defaults to True when libmode=True.
101
+ Set to False to see verbose startup logs even in library mode.
68
102
 
69
103
  Returns
70
104
  -------
@@ -83,6 +117,12 @@ def run_pipeline(
83
117
  Exception
84
118
  Any other exceptions raised during pipeline launch or configuration.
85
119
  """
120
+ # Configure quiet mode for library mode by default (unless explicitly disabled)
121
+ if quiet is None:
122
+ quiet = libmode
123
+ if quiet:
124
+ _configure_quiet_mode()
125
+
86
126
  # Resolve configuration
87
127
  config = resolve_pipeline_config(pipeline_config, libmode)
88
128
  overrides = create_runtime_overrides(disable_dynamic_scaling, dynamic_memory_threshold)
@@ -71,14 +71,14 @@ stages:
71
71
  $YOLOX_HTTP_ENDPOINT|"https://ai.api.nvidia.com/v1/cv/nvidia/nemoretriever-page-elements-v2"
72
72
  ]
73
73
  yolox_infer_protocol: $YOLOX_INFER_PROTOCOL|http
74
- nemoretriever_parse_config:
74
+ nemotron_parse_config:
75
75
  auth_token: $NGC_API_KEY|$NVIDIA_API_KEY
76
- nemoretriever_parse_endpoints: [
77
- $NEMORETRIEVER_PARSE_GRPC_ENDPOINT|"",
78
- $NEMORETRIEVER_PARSE_HTTP_ENDPOINT|"https://integrate.api.nvidia.com/v1/chat/completions"
76
+ nemotron_parse_endpoints: [
77
+ $NEMOTRON_PARSE_GRPC_ENDPOINT|"",
78
+ $NEMOTRON_PARSE_HTTP_ENDPOINT|"https://integrate.api.nvidia.com/v1/chat/completions"
79
79
  ]
80
- nemoretriever_parse_infer_protocol: $NEMORETRIEVER_PARSE_INFER_PROTOCOL|http
81
- nemoretriever_parse_model_name: $NEMORETRIEVER_PARSE_MODEL_NAME|"nvidia/nemoretriever-parse"
80
+ nemotron_parse_infer_protocol: $NEMOTRON_PARSE_INFER_PROTOCOL|http
81
+ nemotron_parse_model_name: $NEMOTRON_PARSE_MODEL_NAME|"nvidia/nemotron-parse"
82
82
  yolox_endpoints: [
83
83
  $YOLOX_GRPC_ENDPOINT|"",
84
84
  $YOLOX_HTTP_ENDPOINT|"https://ai.api.nvidia.com/v1/cv/nvidia/nemoretriever-page-elements-v2"
@@ -334,7 +334,8 @@ stages:
334
334
  api_key: $NGC_API_KEY|$NVIDIA_API_KEY
335
335
  endpoint_url: $VLM_CAPTION_ENDPOINT|"http://vlm:8000/v1/chat/completions"
336
336
  model_name: $VLM_CAPTION_MODEL_NAME|"nvidia/nemotron-nano-12b-v2-vl"
337
- prompt: "Caption the content of this image:"
337
+ prompt: $VLM_CAPTION_PROMPT|"Caption the content of this image:"
338
+ system_prompt: $VLM_CAPTION_SYSTEM_PROMPT|"/no_think"
338
339
  replicas:
339
340
  min_replicas: 0
340
341
  max_replicas:
@@ -70,14 +70,14 @@ stages:
70
70
  $YOLOX_HTTP_ENDPOINT|"http://page-elements:8000/v1/infer",
71
71
  ]
72
72
  yolox_infer_protocol: $YOLOX_INFER_PROTOCOL|grpc
73
- nemoretriever_parse_config:
73
+ nemotron_parse_config:
74
74
  auth_token: $NGC_API_KEY|$NVIDIA_API_KEY
75
- nemoretriever_parse_endpoints: [
76
- $NEMORETRIEVER_PARSE_GRPC_ENDPOINT|"",
77
- $NEMORETRIEVER_PARSE_HTTP_ENDPOINT|"http://nemoretriever-parse:8000/v1/chat/completions",
75
+ nemotron_parse_endpoints: [
76
+ $NEMOTRON_PARSE_GRPC_ENDPOINT|"",
77
+ $NEMOTRON_PARSE_HTTP_ENDPOINT|"http://nemotron-parse:8000/v1/chat/completions",
78
78
  ]
79
- nemoretriever_parse_infer_protocol: $NEMORETRIEVER_PARSE_INFER_PROTOCOL|http
80
- nemoretriever_parse_model_name: $NEMORETRIEVER_PARSE_MODEL_NAME|"nvidia/nemoretriever-parse"
79
+ nemotron_parse_infer_protocol: $NEMOTRON_PARSE_INFER_PROTOCOL|http
80
+ nemotron_parse_model_name: $NEMOTRON_PARSE_MODEL_NAME|"nvidia/nemotron-parse"
81
81
  yolox_endpoints: [
82
82
  $YOLOX_GRPC_ENDPOINT|"page-elements:8001",
83
83
  $YOLOX_HTTP_ENDPOINT|"http://page-elements:8000/v1/infer",
@@ -354,7 +354,8 @@ stages:
354
354
  api_key: $NGC_API_KEY|$NVIDIA_API_KEY
355
355
  model_name: $VLM_CAPTION_MODEL_NAME|"nvidia/nemotron-nano-12b-v2-vl"
356
356
  endpoint_url: $VLM_CAPTION_ENDPOINT|"http://vlm:8000/v1/chat/completions"
357
- prompt: "Caption the content of this image:"
357
+ prompt: $VLM_CAPTION_PROMPT|"Caption the content of this image:"
358
+ system_prompt: $VLM_CAPTION_SYSTEM_PROMPT|"/no_think"
358
359
  replicas:
359
360
  min_replicas: 0
360
361
  max_replicas:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nv-ingest
3
- Version: 2025.12.10.dev20251210
3
+ Version: 2025.12.18.dev20251218
4
4
  Summary: Python module for multimodal document ingestion
5
5
  Author-email: Jeremy Dyer <jdyer@nvidia.com>
6
6
  License: Apache License
@@ -1,7 +1,7 @@
1
1
  nv_ingest/__init__.py,sha256=vJLPeuxiIHqbxXPJSu9qe3MS-GPavbOUExyRq83DxxM,895
2
2
  nv_ingest/version.py,sha256=MG7DxlzpnoJI56vqxwzs9WeMAEI3uPhfDiNLs6GN6wI,986
3
3
  nv_ingest/api/__init__.py,sha256=ED07QUqwVyJalH0ahhnnjvc2W_in6TpZZ5nJ6NWU9-Y,271
4
- nv_ingest/api/main.py,sha256=uCCkUNLS1xE9TDYKDOdxEfo_9jQWumpQAPWrxj5m9Go,1706
4
+ nv_ingest/api/main.py,sha256=qXV8YVrC_Jz2dqyirFD4WEKvSTGHsZEFqLMGgHg8TYc,1706
5
5
  nv_ingest/api/tracing.py,sha256=NkqMuUiB6ixGU5MYp3TrODsZDQepJ1kbH8JFHsYjuE0,2940
6
6
  nv_ingest/api/v1/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
7
7
  nv_ingest/api/v1/health.py,sha256=pV-RoVq5y0iBPp0qZoLzd1xKpd0JiHAi0UMyMj99LqU,4740
@@ -17,7 +17,7 @@ nv_ingest/framework/orchestration/execution/helpers.py,sha256=-F8SZh7ISWtzJz6X1O
17
17
  nv_ingest/framework/orchestration/execution/options.py,sha256=Ms1t4591EIv4ZrMRdhsCYPgLnMVXJosG3MURCbPXUoA,3983
18
18
  nv_ingest/framework/orchestration/process/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
19
19
  nv_ingest/framework/orchestration/process/dependent_services.py,sha256=s0j_rsFtCKHFIuvOkBe9NEAkPNPhSYse_ApeHka8gyg,3032
20
- nv_ingest/framework/orchestration/process/execution.py,sha256=P1kzpYV23e4QYrKw9Td1TCZK3CK1ENVqqnI_axRCqBk,19814
20
+ nv_ingest/framework/orchestration/process/execution.py,sha256=dkGldoudRsFl5wWAbvWnhGBv4ZYOpFOK5fXWncbPFIY,20149
21
21
  nv_ingest/framework/orchestration/process/lifecycle.py,sha256=L5NDwnzSMQPGjqJDC8jC75L1YqWey-dtK8N_HgBzb0E,8001
22
22
  nv_ingest/framework/orchestration/process/strategies.py,sha256=Q1Q04PPseF775omeS0FoXfK187NiS_bbqTaaJRwzKn8,7972
23
23
  nv_ingest/framework/orchestration/process/termination.py,sha256=PAogFeW0FATFS6Mcp_UkZgq_SbWV18RtdZN-0NbComw,5042
@@ -27,7 +27,7 @@ nv_ingest/framework/orchestration/ray/edges/async_queue_edge.py,sha256=PQliU_kyG
27
27
  nv_ingest/framework/orchestration/ray/edges/ray_queue_edge.py,sha256=VFii2yxJuikimOxie3edKq5JN06g78AF8bdHSHVX8p8,2677
28
28
  nv_ingest/framework/orchestration/ray/edges/threaded_queue_edge.py,sha256=N6NH4KgZJ60e_JkGRcSmfQtX37qtX4TMcavOR-n3heE,2549
29
29
  nv_ingest/framework/orchestration/ray/examples/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
30
- nv_ingest/framework/orchestration/ray/examples/pipeline_test_harness.py,sha256=Bn4rjkO14BwvvUNG_HBCSVXetYk7DKqRRsYHJADWqjc,16455
30
+ nv_ingest/framework/orchestration/ray/examples/pipeline_test_harness.py,sha256=UMvrDMZmOu2FKa4W8oD_kpKDXgxYWSifdMbBGveyFh4,16373
31
31
  nv_ingest/framework/orchestration/ray/examples/task_source_harness.py,sha256=Yt7uxThg7s8WuMiaHLKC8r1XAG7QixegfkT-juE5oNw,1953
32
32
  nv_ingest/framework/orchestration/ray/examples/task_source_sink_harness.py,sha256=XkvsoIzH5ftXvAZ4ox7mxbx7ESVx6D8Xupcwbqgd52w,3277
33
33
  nv_ingest/framework/orchestration/ray/primitives/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
@@ -55,6 +55,7 @@ nv_ingest/framework/orchestration/ray/stages/meta/ray_actor_edge_base.py,sha256=
55
55
  nv_ingest/framework/orchestration/ray/stages/meta/ray_actor_sink_stage_base.py,sha256=HQJXIuU7VjiQ6fQjHjbNNmIJX5f30cXFB0CJGixgwVo,3633
56
56
  nv_ingest/framework/orchestration/ray/stages/meta/ray_actor_source_stage_base.py,sha256=hP25MLTP2bOEEncrYdxPPqeRyRVbij8aEurR1F1ZmhE,1811
57
57
  nv_ingest/framework/orchestration/ray/stages/meta/ray_actor_stage_base.py,sha256=qiB_ZU5_3bXgvE9C2rvnXIS0Alm6M5PWLCeQm8ZxOy4,29812
58
+ nv_ingest/framework/orchestration/ray/stages/meta/udf_parallel_helper.py,sha256=uB9bFJq_RVcGcHlYIwnzYufNbHw6-3zgO5N_EI-yxng,2142
58
59
  nv_ingest/framework/orchestration/ray/stages/mutate/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
59
60
  nv_ingest/framework/orchestration/ray/stages/mutate/image_dedup.py,sha256=cPLG3ZEqhZkRiSsbL7cbF1zsvOAimd8K5O-qadUR9Mg,3709
60
61
  nv_ingest/framework/orchestration/ray/stages/mutate/image_filter.py,sha256=f1CS8x9uifY1FJ_1lUF0fNNMExvM4zBIF012gxnSpqU,3523
@@ -80,7 +81,7 @@ nv_ingest/framework/orchestration/ray/util/__init__.py,sha256=wQSlVx3T14ZgQAt-EP
80
81
  nv_ingest/framework/orchestration/ray/util/env_config.py,sha256=GN9msJ_3jdOBIAPnXNxX0ds_BKtHRnRhnYxwzcAU2KY,2386
81
82
  nv_ingest/framework/orchestration/ray/util/pipeline/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
82
83
  nv_ingest/framework/orchestration/ray/util/pipeline/pid_controller.py,sha256=0dSDVTv3FXjMZ79sQh4i4YEwnqND5iPw8GAeZI0oJO4,47338
83
- nv_ingest/framework/orchestration/ray/util/pipeline/pipeline_runners.py,sha256=zWi-6-7dfb_3R00uVi3wdYMH1HgeevkBkg47UY8QqUQ,4386
84
+ nv_ingest/framework/orchestration/ray/util/pipeline/pipeline_runners.py,sha256=yisg0iRC5ss__Sg2HfJBQvqq2qJ_bj288go8FSMc2Zs,6020
84
85
  nv_ingest/framework/orchestration/ray/util/pipeline/tools.py,sha256=MzxLjElEVb6C5ghfJ7GCp8uqNZeVuzz8xJnxzdQmOsI,8425
85
86
  nv_ingest/framework/orchestration/ray/util/system_tools/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
86
87
  nv_ingest/framework/orchestration/ray/util/system_tools/memory.py,sha256=ICqY0LLB3hFTZk03iX5yffMSKFH2q_aQomtDVzS_mKw,2228
@@ -111,15 +112,15 @@ nv_ingest/framework/util/service/meta/ingest/ingest_service_meta.py,sha256=QS3uN
111
112
  nv_ingest/framework/util/telemetry/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
112
113
  nv_ingest/framework/util/telemetry/global_stats.py,sha256=nq65pEEdiwjAfGiqsxG1CeQMC96O3CfQxsZuGFCY-ds,4554
113
114
  nv_ingest/pipeline/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
114
- nv_ingest/pipeline/default_libmode_pipeline_impl.py,sha256=M31VN1xVTdoiNdjaSSPKEZr-yKhXDSwQ1hAVIkpJZLw,16232
115
- nv_ingest/pipeline/default_pipeline_impl.py,sha256=TW9N9UcgsBL5SG1pxuSdgBIyFpBORskbHCmvJBmIIuw,16770
115
+ nv_ingest/pipeline/default_libmode_pipeline_impl.py,sha256=YYASfM68qNhGL5PcK0Fv72qmRZfE2TtY3cq2Oz-L478,16267
116
+ nv_ingest/pipeline/default_pipeline_impl.py,sha256=6SykgH_LJ8uuE2jrWGIT7OkJP6EjPyB8Ju6LMDu5IK0,16800
116
117
  nv_ingest/pipeline/ingest_pipeline.py,sha256=wHAJhqAM2s8nbY-8itVogmSU-yVN4PZONGWcKnhzgfg,17794
117
118
  nv_ingest/pipeline/pipeline_schema.py,sha256=rLZZz2It2o2hVNWrZUJU8CarrqRei1fho3ZEMkkoBcg,17940
118
119
  nv_ingest/pipeline/config/__init__.py,sha256=wQSlVx3T14ZgQAt-EPzEczQusXVW0W8yynnUaFFGE3s,143
119
120
  nv_ingest/pipeline/config/loaders.py,sha256=75Yr9WYO7j7ghvKTnYLfZXQZEH3J3VEZo5J4TunC_Us,7590
120
121
  nv_ingest/pipeline/config/replica_resolver.py,sha256=dEwqMXNttfw0QeisTGGkp24785jqzVCDAEFyQIffeGc,9369
121
- nv_ingest-2025.12.10.dev20251210.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
122
- nv_ingest-2025.12.10.dev20251210.dist-info/METADATA,sha256=4wQaqrQjyq98-3vTXm-gQsgDmgzyrv8RGC0hsCN7jSs,15163
123
- nv_ingest-2025.12.10.dev20251210.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
124
- nv_ingest-2025.12.10.dev20251210.dist-info/top_level.txt,sha256=sjb0ajIsgn3YgftSjZHlYO0HjYAIIhNuXG_AmywCvaU,10
125
- nv_ingest-2025.12.10.dev20251210.dist-info/RECORD,,
122
+ nv_ingest-2025.12.18.dev20251218.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
123
+ nv_ingest-2025.12.18.dev20251218.dist-info/METADATA,sha256=IKN6vMaeikLmkBrVDi_k-zTAxpcJd1GdvQ98E_M6KIY,15163
124
+ nv_ingest-2025.12.18.dev20251218.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
125
+ nv_ingest-2025.12.18.dev20251218.dist-info/top_level.txt,sha256=sjb0ajIsgn3YgftSjZHlYO0HjYAIIhNuXG_AmywCvaU,10
126
+ nv_ingest-2025.12.18.dev20251218.dist-info/RECORD,,