triton-model-analyzer 1.48.0__py3-none-any.whl → 1.50.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. model_analyzer/config/generate/brute_plus_binary_parameter_search_run_config_generator.py +10 -14
  2. model_analyzer/config/generate/model_profile_spec.py +76 -19
  3. model_analyzer/config/generate/perf_analyzer_config_generator.py +14 -22
  4. model_analyzer/config/generate/quick_plus_concurrency_sweep_run_config_generator.py +2 -16
  5. model_analyzer/config/generate/quick_run_config_generator.py +86 -35
  6. model_analyzer/config/generate/run_config_generator_factory.py +195 -31
  7. model_analyzer/config/input/config_command.py +81 -20
  8. model_analyzer/config/input/config_command_profile.py +41 -14
  9. model_analyzer/config/input/config_defaults.py +3 -15
  10. model_analyzer/perf_analyzer/perf_analyzer.py +4 -16
  11. model_analyzer/perf_analyzer/perf_config.py +23 -15
  12. model_analyzer/plots/detailed_plot.py +41 -54
  13. model_analyzer/record/metrics_manager.py +7 -15
  14. model_analyzer/record/types/gpu_free_memory.py +13 -14
  15. model_analyzer/record/types/gpu_total_memory.py +13 -14
  16. model_analyzer/record/types/gpu_used_memory.py +13 -14
  17. model_analyzer/result/parameter_search.py +10 -17
  18. model_analyzer/result/run_config_measurement.py +29 -24
  19. {triton_model_analyzer-1.48.0.dist-info → triton_model_analyzer-1.50.0.dist-info}/METADATA +1 -1
  20. {triton_model_analyzer-1.48.0.dist-info → triton_model_analyzer-1.50.0.dist-info}/RECORD +24 -24
  21. {triton_model_analyzer-1.48.0.dist-info → triton_model_analyzer-1.50.0.dist-info}/WHEEL +1 -1
  22. {triton_model_analyzer-1.48.0.dist-info → triton_model_analyzer-1.50.0.dist-info}/entry_points.txt +0 -0
  23. {triton_model_analyzer-1.48.0.dist-info → triton_model_analyzer-1.50.0.dist-info}/licenses/LICENSE +0 -0
  24. {triton_model_analyzer-1.48.0.dist-info → triton_model_analyzer-1.50.0.dist-info}/top_level.txt +0 -0
@@ -1,19 +1,9 @@
1
1
  #!/usr/bin/env python3
2
+ # SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
2
4
 
3
- # Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
5
+ import logging
6
+ import math
17
7
  from typing import Dict, List
18
8
 
19
9
  from model_analyzer.config.generate.model_profile_spec import ModelProfileSpec
@@ -25,7 +15,7 @@ from model_analyzer.config.input.config_command_profile import ConfigCommandProf
25
15
  from model_analyzer.config.input.objects.config_model_profile_spec import (
26
16
  ConfigModelProfileSpec,
27
17
  )
28
- from model_analyzer.constants import MIN_INITIALIZED, RADIUS
18
+ from model_analyzer.constants import LOGGER_NAME, MIN_INITIALIZED, RADIUS
29
19
  from model_analyzer.device.gpu_device import GPUDevice
30
20
  from model_analyzer.model_analyzer_exceptions import TritonModelAnalyzerException
31
21
  from model_analyzer.result.result_manager import ResultManager
@@ -47,6 +37,8 @@ from .search_config import SearchConfig
47
37
  from .search_dimension import SearchDimension
48
38
  from .search_dimensions import SearchDimensions
49
39
 
40
+ logger = logging.getLogger(LOGGER_NAME)
41
+
50
42
 
51
43
  class RunConfigGeneratorFactory:
52
44
  """
@@ -221,9 +213,7 @@ class RunConfigGeneratorFactory:
221
213
  if model.is_ensemble():
222
214
  continue
223
215
 
224
- dims = RunConfigGeneratorFactory._get_dimensions_for_model(
225
- model.supports_batching()
226
- )
216
+ dims = RunConfigGeneratorFactory._get_dimensions_for_model(model)
227
217
  dimensions.add_dimensions(index, dims)
228
218
  index += 1
229
219
 
@@ -234,14 +224,161 @@ class RunConfigGeneratorFactory:
234
224
  return search_config
235
225
 
236
226
  @staticmethod
237
- def _get_dimensions_for_model(is_batching_supported: bool) -> List[SearchDimension]:
238
- if is_batching_supported:
239
- return RunConfigGeneratorFactory._get_batching_supported_dimensions()
227
+ def _get_dimensions_for_model(model: ModelProfileSpec) -> List[SearchDimension]:
228
+ """
229
+ Create search dimensions for a model, respecting user-specified
230
+ instance_group count lists if provided.
231
+ """
232
+ dims = []
233
+
234
+ # Check if user specified instance_group with a count list
235
+ instance_count_list = RunConfigGeneratorFactory._get_instance_count_list(model)
236
+
237
+ if instance_count_list:
238
+ # User specified a list - create constrained dimension
239
+ dim = RunConfigGeneratorFactory._create_instance_dimension_from_list(
240
+ instance_count_list
241
+ )
242
+ dims.append(dim)
243
+ else:
244
+ # Use default unbounded dimension
245
+ dims.append(
246
+ SearchDimension("instance_count", SearchDimension.DIMENSION_TYPE_LINEAR)
247
+ )
248
+
249
+ # Add max_batch_size dimension if model supports batching
250
+ if model.supports_batching():
251
+ # For now, max_batch_size always uses default exponential dimension
252
+ # Could be extended to support user-specified lists in the future
253
+ dims.insert(
254
+ 0,
255
+ SearchDimension(
256
+ "max_batch_size", SearchDimension.DIMENSION_TYPE_EXPONENTIAL
257
+ ),
258
+ )
259
+
260
+ return dims
261
+
262
+ @staticmethod
263
+ def _get_instance_count_list(model: ModelProfileSpec) -> List[int]:
264
+ """
265
+ Extract instance_group count list from model config parameters if specified.
266
+
267
+ Returns empty list if not specified or not a list.
268
+ """
269
+ model_config_params = model.model_config_parameters()
270
+ if not model_config_params:
271
+ return []
272
+
273
+ if "instance_group" not in model_config_params:
274
+ return []
275
+
276
+ # instance_group structure: [[ {'kind': 'KIND_GPU', 'count': [1, 2, 4]} ]]
277
+ # The outer lists are from config parsing wrapping
278
+ instance_group = model_config_params["instance_group"]
279
+
280
+ if not instance_group or not isinstance(instance_group, list):
281
+ return []
282
+
283
+ # Unwrap the nested structure
284
+ if len(instance_group) > 0 and isinstance(instance_group[0], list):
285
+ instance_group = instance_group[0]
286
+
287
+ if len(instance_group) == 0 or not isinstance(instance_group[0], dict):
288
+ return []
289
+
290
+ count = instance_group[0].get("count")
291
+ if isinstance(count, list) and len(count) > 0:
292
+ return count
293
+
294
+ return []
295
+
296
+ @staticmethod
297
+ def _create_instance_dimension_from_list(
298
+ count_list: List[int],
299
+ ) -> SearchDimension:
300
+ """
301
+ Create a SearchDimension for instance_count from a user-specified list.
302
+
303
+ For lists that are powers of 2 (e.g., [1, 2, 4, 8, 16, 32]),
304
+ uses EXPONENTIAL dimension type with appropriate min/max indexes.
305
+
306
+ For other lists, uses LINEAR dimension type with appropriate min/max.
307
+
308
+ Raises TritonModelAnalyzerException if the list is not compatible with
309
+ either LINEAR or EXPONENTIAL growth patterns.
310
+ """
311
+ if not count_list or len(count_list) == 0:
312
+ raise TritonModelAnalyzerException("Instance count list cannot be empty")
313
+
314
+ # Sort the list to check for patterns
315
+ sorted_counts = sorted(count_list)
316
+
317
+ # Check if it's powers of 2
318
+ if RunConfigGeneratorFactory._is_powers_of_two(sorted_counts):
319
+ # Use EXPONENTIAL: 2^idx gives the value
320
+ # For [1, 2, 4, 8, 16, 32]: min_idx=0 (2^0=1), max_idx=5 (2^5=32)
321
+ min_idx = int(math.log2(sorted_counts[0]))
322
+ max_idx = int(math.log2(sorted_counts[-1]))
323
+
324
+ return SearchDimension(
325
+ "instance_count",
326
+ SearchDimension.DIMENSION_TYPE_EXPONENTIAL,
327
+ min=min_idx,
328
+ max=max_idx,
329
+ )
330
+
331
+ # Check if it's a contiguous linear sequence
332
+ elif RunConfigGeneratorFactory._is_linear_sequence(sorted_counts):
333
+ # Use LINEAR: idx+1 gives the value (LINEAR starts at 1, not 0)
334
+ # For [1, 2, 3, 4]: min_idx=0 (0+1=1), max_idx=3 (3+1=4)
335
+ min_idx = sorted_counts[0] - 1
336
+ max_idx = sorted_counts[-1] - 1
337
+
338
+ return SearchDimension(
339
+ "instance_count",
340
+ SearchDimension.DIMENSION_TYPE_LINEAR,
341
+ min=min_idx,
342
+ max=max_idx,
343
+ )
344
+
240
345
  else:
241
- return RunConfigGeneratorFactory._get_batching_not_supported_dimensions()
346
+ # List is not compatible with LINEAR or EXPONENTIAL
347
+ raise TritonModelAnalyzerException(
348
+ f"Instance count list {count_list} is not compatible with Quick search mode. "
349
+ f"Lists must be either powers of 2 (e.g., [1, 2, 4, 8, 16, 32]) "
350
+ f"or a contiguous sequence (e.g., [1, 2, 3, 4, 5])."
351
+ )
352
+
353
+ @staticmethod
354
+ def _is_powers_of_two(sorted_list: List[int]) -> bool:
355
+ """Check if all values in the list are powers of 2 and form a valid sequence."""
356
+ for val in sorted_list:
357
+ if val <= 0:
358
+ return False
359
+ # Check if val is a power of 2: log2(val) should be an integer
360
+ log_val = math.log2(val)
361
+ if not log_val.is_integer():
362
+ return False
363
+
364
+ return True
365
+
366
+ @staticmethod
367
+ def _is_linear_sequence(sorted_list: List[int]) -> bool:
368
+ """Check if the list is a contiguous linear sequence."""
369
+ if len(sorted_list) < 2:
370
+ return True
371
+
372
+ # Check if values are consecutive: diff should always be 1
373
+ for i in range(1, len(sorted_list)):
374
+ if sorted_list[i] - sorted_list[i - 1] != 1:
375
+ return False
376
+
377
+ return True
242
378
 
243
379
  @staticmethod
244
380
  def _get_batching_supported_dimensions() -> List[SearchDimension]:
381
+ """Legacy method - kept for backward compatibility."""
245
382
  return [
246
383
  SearchDimension(
247
384
  f"max_batch_size", SearchDimension.DIMENSION_TYPE_EXPONENTIAL
@@ -251,6 +388,7 @@ class RunConfigGeneratorFactory:
251
388
 
252
389
  @staticmethod
253
390
  def _get_batching_not_supported_dimensions() -> List[SearchDimension]:
391
+ """Legacy method - kept for backward compatibility."""
254
392
  return [
255
393
  SearchDimension(f"instance_count", SearchDimension.DIMENSION_TYPE_LINEAR)
256
394
  ]
@@ -306,24 +444,50 @@ class RunConfigGeneratorFactory:
306
444
  gpus: List[GPUDevice],
307
445
  ) -> List[ModelProfileSpec]:
308
446
  """
309
- Creates a list of Ensemble composing model configs based on the model
447
+ Creates a list of Ensemble composing model configs based on the model.
448
+
449
+ If user specified ensemble_composing_models configs, use those for matching models.
450
+ Otherwise, use auto-discovered configs from ensemble_scheduling.
310
451
  """
311
452
  model_config = ModelConfig.create_from_profile_spec(model, config, client, gpus)
312
453
 
313
454
  if not model_config.is_ensemble():
314
455
  return []
315
456
 
457
+ # Auto-discover composing model names from ensemble_scheduling
316
458
  ensemble_composing_model_names = model_config.get_ensemble_composing_models()
459
+ if ensemble_composing_model_names is None:
460
+ return []
317
461
 
318
- ensemble_composing_model_specs = (
319
- ConfigModelProfileSpec.model_list_to_config_model_profile_spec(
462
+ # Check if user provided configs for any of these models
463
+ user_provided_configs = {}
464
+ if config.ensemble_composing_models is not None:
465
+ for user_spec in config.ensemble_composing_models:
466
+ user_provided_configs[user_spec.model_name()] = user_spec
467
+
468
+ # Create ModelProfileSpecs, using user configs when available
469
+ ensemble_composing_model_configs = []
470
+ for model_name in ensemble_composing_model_names:
471
+ if model_name in user_provided_configs:
472
+ # Use user-provided config with model_config_parameters
473
+ model_spec = user_provided_configs[model_name]
474
+ else:
475
+ # Use auto-discovered config (just model name, no parameters)
476
+ model_spec = ConfigModelProfileSpec(model_name)
477
+
478
+ mps = ModelProfileSpec(model_spec, config, client, gpus)
479
+ ensemble_composing_model_configs.append(mps)
480
+
481
+ # Warn if user specified models that aren't in the ensemble
482
+ if user_provided_configs:
483
+ unused_models = set(user_provided_configs.keys()) - set(
320
484
  ensemble_composing_model_names
321
485
  )
322
- )
323
-
324
- ensemble_composing_model_configs = [
325
- ModelProfileSpec(ensemble_composing_model_spec, config, client, gpus)
326
- for ensemble_composing_model_spec in ensemble_composing_model_specs
327
- ]
486
+ if unused_models:
487
+ logger.warning(
488
+ f"The following models in ensemble_composing_models were not found "
489
+ f"in the ensemble '{model.model_name()}' and will be ignored: "
490
+ f"{', '.join(sorted(unused_models))}"
491
+ )
328
492
 
329
493
  return ensemble_composing_model_configs
@@ -1,18 +1,6 @@
1
1
  #!/usr/bin/env python3
2
-
3
- # Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
2
+ # SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
16
4
 
17
5
  from argparse import Namespace
18
6
  from copy import deepcopy
@@ -251,8 +239,18 @@ class ConfigCommand:
251
239
  ):
252
240
  return
253
241
 
242
+ # Get composing model names for validation
243
+ bls_composing = (
244
+ self._get_config_value("bls_composing_models", args, yaml_config) or []
245
+ )
246
+ cpu_only_composing = (
247
+ self._get_config_value("cpu_only_composing_models", args, yaml_config) or []
248
+ )
249
+
254
250
  self._check_per_model_parameters(profile_models)
255
- self._check_per_model_model_config_parameters(profile_models)
251
+ self._check_per_model_model_config_parameters(
252
+ profile_models, bls_composing, cpu_only_composing
253
+ )
256
254
 
257
255
  def _check_per_model_parameters(self, profile_models: Dict) -> None:
258
256
  for model in profile_models.values():
@@ -268,23 +266,81 @@ class ConfigCommand:
268
266
  "\nPlease use brute search mode or remove concurrency/batch sizes list."
269
267
  )
270
268
 
271
- def _check_per_model_model_config_parameters(self, profile_models: Dict) -> None:
272
- for model in profile_models.values():
269
+ def _check_per_model_model_config_parameters(
270
+ self, profile_models: Dict, bls_composing: List, cpu_only_composing: List
271
+ ) -> None:
272
+ for model_name, model in profile_models.items():
273
273
  if not "model_config_parameters" in model:
274
274
  continue
275
275
 
276
+ # Check if this is a composing model
277
+ is_composing = False
278
+ if bls_composing:
279
+ # bls_composing might be a list of dicts or list of strings
280
+ if isinstance(bls_composing, list):
281
+ is_composing = any(
282
+ (isinstance(m, dict) and m.get("model_name") == model_name)
283
+ or (isinstance(m, str) and m == model_name)
284
+ for m in bls_composing
285
+ )
286
+ if (
287
+ not is_composing
288
+ and cpu_only_composing
289
+ and model_name in cpu_only_composing
290
+ ):
291
+ is_composing = True
292
+
293
+ # Composing models are allowed to have these parameters with ranges
294
+ if is_composing:
295
+ continue
296
+
276
297
  if "max_batch_size" in model["model_config_parameters"]:
277
298
  raise TritonModelAnalyzerException(
278
- f"\nProfiling of models in quick search mode is not supported with lists of max batch sizes."
299
+ f"\nProfiling of top-level models in quick search mode is not supported with lists of max batch sizes."
279
300
  "\nPlease use brute search mode or remove max batch size list."
301
+ "\nNote: Composing models in ensembles/BLS can have max_batch_size ranges in Quick mode."
280
302
  )
281
303
 
282
304
  if "instance_group" in model["model_config_parameters"]:
283
305
  raise TritonModelAnalyzerException(
284
- f"\nProfiling of models in quick search mode is not supported with instance group as a model config parameter"
306
+ f"\nProfiling of top-level models in quick search mode is not supported with instance group as a model config parameter."
285
307
  "\nPlease use brute search mode or remove instance_group from 'model_config_parameters'."
308
+ "\nNote: Composing models in ensembles/BLS can have instance_group with count ranges in Quick mode."
286
309
  )
287
310
 
311
+ def _is_composing_model(self, model_name: str, config: Dict) -> bool:
312
+ """
313
+ Determine if a model is a composing model by checking:
314
+ 1. If it's in bls_composing_models list
315
+ 2. If it's in ensemble_composing_models list
316
+ 3. If it's in cpu_only_composing_models list
317
+
318
+ Note: We cannot check ensemble_scheduling at this stage because
319
+ we haven't loaded the model configs from the repository yet.
320
+ Users must explicitly list ensemble composing models in ensemble_composing_models
321
+ to enable parameter ranges for them.
322
+ """
323
+ if "bls_composing_models" in config:
324
+ bls_composing = config["bls_composing_models"].value()
325
+ if bls_composing and any(
326
+ m.model_name() == model_name for m in bls_composing
327
+ ):
328
+ return True
329
+
330
+ if "ensemble_composing_models" in config:
331
+ ensemble_composing = config["ensemble_composing_models"].value()
332
+ if ensemble_composing and any(
333
+ m.model_name() == model_name for m in ensemble_composing
334
+ ):
335
+ return True
336
+
337
+ if "cpu_only_composing_models" in config:
338
+ cpu_only_composing = config["cpu_only_composing_models"].value()
339
+ if cpu_only_composing and model_name in cpu_only_composing:
340
+ return True
341
+
342
+ return False
343
+
288
344
  def _check_quick_search_model_config_parameters_combinations(self) -> None:
289
345
  config = self.get_config()
290
346
  if not "profile_models" in config:
@@ -295,13 +351,18 @@ class ConfigCommand:
295
351
 
296
352
  profile_models = config["profile_models"].value()
297
353
  for model in profile_models:
354
+ # Composing models are allowed to have parameter ranges in Quick mode
355
+ if self._is_composing_model(model.model_name(), config):
356
+ continue
357
+
298
358
  model_config_params = deepcopy(model.model_config_parameters())
299
359
  if model_config_params:
300
360
  if len(GeneratorUtils.generate_combinations(model_config_params)) > 1:
301
361
  raise TritonModelAnalyzerException(
302
- f"\nProfiling of models in quick search mode is not supported for the specified model config parameters, "
362
+ f"\nProfiling of top-level models in quick search mode is not supported for the specified model config parameters, "
303
363
  f"as more than one combination of parameters can be generated."
304
364
  f"\nPlease use brute search mode to profile or remove the model config parameters specified."
365
+ f"\nNote: Composing models in ensembles/BLS can have parameter ranges in Quick mode."
305
366
  )
306
367
 
307
368
  def _check_bls_no_brute_search(
@@ -1,18 +1,6 @@
1
1
  #!/usr/bin/env python3
2
-
3
- # Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
2
+ # SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
16
4
 
17
5
  import argparse
18
6
  import logging
@@ -803,6 +791,39 @@ class ConfigCommandProfile(ConfigCommand):
803
791
  description="List of the models to be profiled",
804
792
  )
805
793
  )
794
+ self._add_config(
795
+ ConfigField(
796
+ "ensemble_composing_models",
797
+ flags=["--ensemble-composing-models"],
798
+ field_type=ConfigUnion(
799
+ [
800
+ profile_model_scheme,
801
+ ConfigListGeneric(
802
+ ConfigUnion(
803
+ [
804
+ profile_model_scheme,
805
+ ConfigPrimitive(
806
+ str,
807
+ output_mapper=ConfigModelProfileSpec.model_str_to_config_model_profile_spec,
808
+ ),
809
+ ]
810
+ ),
811
+ required=True,
812
+ output_mapper=ConfigModelProfileSpec.model_mixed_to_config_model_profile_spec,
813
+ ),
814
+ ConfigListString(
815
+ output_mapper=ConfigModelProfileSpec.model_list_to_config_model_profile_spec
816
+ ),
817
+ ],
818
+ required=True,
819
+ ),
820
+ default_value=[],
821
+ description="List of ensemble composing models with optional model config parameters. "
822
+ "Composing models are auto-discovered from ensemble_scheduling. Use this option to specify "
823
+ "configurations (e.g., instance_group count ranges) for any auto-discovered models. "
824
+ "Only models found in ensemble_scheduling will be profiled; others will be ignored with a warning.",
825
+ )
826
+ )
806
827
  self._add_config(
807
828
  ConfigField(
808
829
  "cpu_only_composing_models",
@@ -1701,9 +1722,15 @@ class ConfigCommandProfile(ConfigCommand):
1701
1722
  new_profile_models[model.model_name()] = new_model
1702
1723
 
1703
1724
  # deepcopy is necessary, else it gets overwritten when updating profile_models
1725
+ # Both bls_composing_models and ensemble_composing_models share the same
1726
+ # profile_model_scheme ConfigObject, so updating profile_models would
1727
+ # overwrite their values.
1704
1728
  self._fields["bls_composing_models"] = deepcopy(
1705
1729
  self._fields["bls_composing_models"]
1706
1730
  )
1731
+ self._fields["ensemble_composing_models"] = deepcopy(
1732
+ self._fields["ensemble_composing_models"]
1733
+ )
1707
1734
  self._fields["profile_models"].set_value(new_profile_models)
1708
1735
 
1709
1736
  def _using_request_rate(self) -> bool:
@@ -1,18 +1,6 @@
1
1
  #!/usr/bin/env python3
2
-
3
- # Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
2
+ # SPDX-FileCopyrightText: Copyright (c) 2021-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
16
4
 
17
5
  import os
18
6
 
@@ -64,7 +52,7 @@ DEFAULT_REQUEST_RATE_SEARCH_ENABLE = False
64
52
  DEFAULT_CONCURRENCY_SWEEP_DISABLE = False
65
53
  DEFAULT_DCGM_DISABLE = False
66
54
  DEFAULT_TRITON_LAUNCH_MODE = "local"
67
- DEFAULT_TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:25.11-py3"
55
+ DEFAULT_TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:26.01-py3"
68
56
  DEFAULT_TRITON_HTTP_ENDPOINT = "localhost:8000"
69
57
  DEFAULT_TRITON_GRPC_ENDPOINT = "localhost:8001"
70
58
  DEFAULT_TRITON_METRICS_URL = "http://localhost:8002/metrics"
@@ -1,18 +1,6 @@
1
1
  #!/usr/bin/env python3
2
-
3
- # Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
2
+ # SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
16
4
 
17
5
  import csv
18
6
  import glob
@@ -432,8 +420,8 @@ class PerfAnalyzer:
432
420
  return cmd
433
421
 
434
422
  def _get_single_model_cmd(self, index):
435
- if self._model_type == "LLM":
436
- cmd = ["genai-perf", "-m", self._config.models_name()]
423
+ if self._model_type.lower() == "llm":
424
+ cmd = ["genai-perf", "profile", "-m", self._config.models_name()]
437
425
  cmd += self._get_genai_perf_cli_command(index).replace("=", " ").split()
438
426
  cmd += ["--"]
439
427
  cmd += (
@@ -1,18 +1,6 @@
1
1
  #!/usr/bin/env python3
2
-
3
- # Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
2
+ # SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
16
4
 
17
5
  from typing import List
18
6
 
@@ -98,6 +86,13 @@ class PerfAnalyzerConfig:
98
86
  "collect-metrics",
99
87
  ]
100
88
 
89
+ # Only one of these args can be sent to PA, as each one controls the inference load in a different way
90
+ inference_load_args = [
91
+ "concurrency-range",
92
+ "request-rate-range",
93
+ "request-intervals",
94
+ ]
95
+
101
96
  def __init__(self):
102
97
  """
103
98
  Construct a PerfAnalyzerConfig
@@ -108,7 +103,9 @@ class PerfAnalyzerConfig:
108
103
  self._options = {
109
104
  "-m": None,
110
105
  "-x": None,
111
- "-b": None,
106
+ # Default to batch size of 1. This would be handled by PA if unspecified,
107
+ # but we want to be explicit so we can properly print/track values
108
+ "-b": 1,
112
109
  "-u": None,
113
110
  "-i": None,
114
111
  "-f": None,
@@ -160,6 +157,16 @@ class PerfAnalyzerConfig:
160
157
 
161
158
  return cls.additive_args[:]
162
159
 
160
+ @classmethod
161
+ def get_inference_load_args(cls):
162
+ """
163
+ Returns
164
+ -------
165
+ list of str
166
+ The Perf Analyzer args that control the inference load
167
+ """
168
+ return cls.inference_load_args
169
+
163
170
  def update_config(self, params=None):
164
171
  """
165
172
  Allows setting values from a params dict
@@ -275,6 +282,7 @@ class PerfAnalyzerConfig:
275
282
  "batch-size": self._options["-b"],
276
283
  "concurrency-range": self._args["concurrency-range"],
277
284
  "request-rate-range": self._args["request-rate-range"],
285
+ "request-intervals": self._args["request-intervals"],
278
286
  }
279
287
 
280
288
  @classmethod