lightning-sdk 2025.8.19.post0__py3-none-any.whl → 2025.8.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. lightning_sdk/__init__.py +1 -1
  2. lightning_sdk/api/llm_api.py +6 -2
  3. lightning_sdk/api/studio_api.py +99 -0
  4. lightning_sdk/cli/legacy/create.py +9 -11
  5. lightning_sdk/cli/legacy/start.py +1 -0
  6. lightning_sdk/cli/legacy/switch.py +1 -0
  7. lightning_sdk/cli/studio/start.py +1 -0
  8. lightning_sdk/cli/studio/switch.py +1 -0
  9. lightning_sdk/lightning_cloud/openapi/__init__.py +1 -0
  10. lightning_sdk/lightning_cloud/openapi/api/billing_service_api.py +85 -0
  11. lightning_sdk/lightning_cloud/openapi/models/__init__.py +1 -0
  12. lightning_sdk/lightning_cloud/openapi/models/assistant_id_conversations_body.py +15 -15
  13. lightning_sdk/lightning_cloud/openapi/models/v1_pod_metrics.py +157 -1
  14. lightning_sdk/lightning_cloud/openapi/models/v1_project_cluster_binding.py +27 -1
  15. lightning_sdk/lightning_cloud/openapi/models/v1_quote_annual_upsell_response.py +201 -0
  16. lightning_sdk/lightning_cloud/openapi/models/v1_user_features.py +1 -27
  17. lightning_sdk/llm/llm.py +2 -2
  18. lightning_sdk/studio.py +39 -6
  19. lightning_sdk/utils/progress.py +284 -0
  20. {lightning_sdk-2025.8.19.post0.dist-info → lightning_sdk-2025.8.21.dist-info}/METADATA +1 -1
  21. {lightning_sdk-2025.8.19.post0.dist-info → lightning_sdk-2025.8.21.dist-info}/RECORD +25 -23
  22. {lightning_sdk-2025.8.19.post0.dist-info → lightning_sdk-2025.8.21.dist-info}/LICENSE +0 -0
  23. {lightning_sdk-2025.8.19.post0.dist-info → lightning_sdk-2025.8.21.dist-info}/WHEEL +0 -0
  24. {lightning_sdk-2025.8.19.post0.dist-info → lightning_sdk-2025.8.21.dist-info}/entry_points.txt +0 -0
  25. {lightning_sdk-2025.8.19.post0.dist-info → lightning_sdk-2025.8.21.dist-info}/top_level.txt +0 -0
lightning_sdk/__init__.py CHANGED
@@ -32,6 +32,6 @@ __all__ = [
32
32
  "User",
33
33
  ]
34
34
 
35
- __version__ = "2025.08.19.post0"
35
+ __version__ = "2025.08.21"
36
36
  _check_version_and_prompt_upgrade(__version__)
37
37
  _set_tqdm_envvars_noninteractive()
@@ -146,7 +146,6 @@ class LLMApi:
146
146
  {"contentType": "text", "parts": [prompt]},
147
147
  ],
148
148
  },
149
- "max_tokens": max_completion_tokens,
150
149
  "conversation_id": conversation_id,
151
150
  "billing_project_id": billing_project_id,
152
151
  "name": name,
@@ -159,6 +158,9 @@ class LLMApi:
159
158
  "parent_message_id": kwargs.get("parent_message_id", ""),
160
159
  "tools": tools,
161
160
  }
161
+ if max_completion_tokens is not None:
162
+ body["max_completion_tokens"] = max_completion_tokens
163
+
162
164
  if images:
163
165
  for image in images:
164
166
  url = image
@@ -203,7 +205,6 @@ class LLMApi:
203
205
  {"contentType": "text", "parts": [prompt]},
204
206
  ],
205
207
  },
206
- "max_completion_tokens": max_completion_tokens,
207
208
  "conversation_id": conversation_id,
208
209
  "billing_project_id": billing_project_id,
209
210
  "name": name,
@@ -216,6 +217,9 @@ class LLMApi:
216
217
  "parent_message_id": kwargs.get("parent_message_id", ""),
217
218
  "sent_at": datetime.datetime.now(datetime.timezone.utc).isoformat(timespec="microseconds"),
218
219
  }
220
+ if max_completion_tokens is not None:
221
+ body["max_completion_tokens"] = max_completion_tokens
222
+
219
223
  if images:
220
224
  for image in images:
221
225
  url = image
@@ -205,6 +205,32 @@ class StudioApi:
205
205
  instance_id = code_status.in_use.cloud_space_instance_id
206
206
  print(f"Studio started | {teamspace_id=} {studio_id=} {instance_id=}")
207
207
 
208
+ def start_studio_async(
209
+ self,
210
+ studio_id: str,
211
+ teamspace_id: str,
212
+ machine: Union[Machine, str],
213
+ interruptible: bool = False,
214
+ max_runtime: Optional[int] = None,
215
+ ) -> None:
216
+ """Start an existing Studio without blocking."""
217
+ # need to go via kwargs for typing compatibility since autogenerated apis accept None but aren't typed with None
218
+ optional_kwargs_compute_body = {}
219
+
220
+ if max_runtime is not None:
221
+ optional_kwargs_compute_body["requested_run_duration_seconds"] = str(max_runtime)
222
+ self._client.cloud_space_service_start_cloud_space_instance(
223
+ IdStartBody(
224
+ compute_config=V1UserRequestedComputeConfig(
225
+ name=_machine_to_compute_name(machine),
226
+ spot=interruptible,
227
+ **optional_kwargs_compute_body,
228
+ )
229
+ ),
230
+ teamspace_id,
231
+ studio_id,
232
+ )
233
+
208
234
  def stop_studio(self, studio_id: str, teamspace_id: str) -> None:
209
235
  """Stop an existing Studio."""
210
236
  self.stop_keeping_alive(teamspace_id=teamspace_id, studio_id=studio_id)
@@ -289,6 +315,79 @@ class StudioApi:
289
315
  break
290
316
  time.sleep(1)
291
317
 
318
+ def switch_studio_machine_with_progress(
319
+ self,
320
+ studio_id: str,
321
+ teamspace_id: str,
322
+ machine: Union[Machine, str],
323
+ interruptible: bool,
324
+ progress: Any, # StudioProgressTracker - avoid circular import
325
+ ) -> None:
326
+ """Switches given Studio to a new machine type with progress tracking."""
327
+ progress.update_progress(10, "Requesting machine switch...")
328
+
329
+ self._request_switch(
330
+ studio_id=studio_id, teamspace_id=teamspace_id, machine=machine, interruptible=interruptible
331
+ )
332
+
333
+ progress.update_progress(20, "Waiting for machine allocation...")
334
+
335
+ # Wait until it's time to switch
336
+ requested_was_found = False
337
+ startup_status = None
338
+ base_progress = 20
339
+ max_wait_progress = 60
340
+ wait_counter = 0
341
+
342
+ while True:
343
+ status = self.get_studio_status(studio_id, teamspace_id)
344
+ requested_machine = status.requested
345
+
346
+ if requested_machine is not None:
347
+ requested_was_found = True
348
+ startup_status = requested_machine.startup_status
349
+
350
+ # if the requested machine was found in the past, use the in_use status instead.
351
+ # it might be that it either was cancelled or it actually is ready.
352
+ # Either way, since we're actually blocking below for the in use startup status
353
+ # it's safe to switch at this point
354
+ elif requested_was_found:
355
+ in_use_machine = status.in_use
356
+ if in_use_machine is not None:
357
+ startup_status = in_use_machine.startup_status
358
+
359
+ if startup_status and startup_status.initial_restore_finished:
360
+ break
361
+
362
+ # Update progress gradually while waiting
363
+ wait_counter += 1
364
+ current_progress = min(base_progress + (wait_counter * 2), max_wait_progress)
365
+ progress.update_progress(current_progress, "Allocating new machine...")
366
+ time.sleep(1)
367
+
368
+ progress.update_progress(70, "Starting machine switch...")
369
+ self._client.cloud_space_service_switch_cloud_space_instance(teamspace_id, studio_id)
370
+
371
+ progress.update_progress(80, "Configuring new machine...")
372
+
373
+ # Wait until the new machine is ready to use
374
+ switch_counter = 0
375
+ while True:
376
+ in_use = self.get_studio_status(studio_id, teamspace_id).in_use
377
+ if in_use is None:
378
+ continue
379
+ startup_status = in_use.startup_status
380
+ if startup_status and startup_status.top_up_restore_finished:
381
+ break
382
+
383
+ # Update progress while waiting for machine to be ready
384
+ switch_counter += 1
385
+ current_progress = min(80 + switch_counter, 95)
386
+ progress.update_progress(current_progress, "Finalizing machine setup...")
387
+ time.sleep(1)
388
+
389
+ progress.complete("Machine switch completed successfully")
390
+
292
391
  def get_machine(self, studio_id: str, teamspace_id: str, cloud_account_id: str, org_id: str) -> Machine:
293
392
  """Get the current machine type the given Studio is running on."""
294
393
  response: V1CloudSpaceInstanceConfig = self._client.cloud_space_service_get_cloud_space_instance_config(
@@ -7,10 +7,8 @@ import click
7
7
  from rich.console import Console
8
8
 
9
9
  from lightning_sdk import Machine, Studio
10
- from lightning_sdk.api.cloud_account_api import CloudAccountApi
11
10
  from lightning_sdk.cli.legacy.teamspace_menu import _TeamspacesMenu
12
11
  from lightning_sdk.machine import CloudProvider
13
- from lightning_sdk.utils.resolve import _resolve_deprecated_provider
14
12
 
15
13
  _MACHINE_VALUES = tuple(
16
14
  [machine.name for machine in Machine.__dict__.values() if isinstance(machine, Machine) and machine._include_in_cli]
@@ -83,14 +81,6 @@ def studio(
83
81
  menu = _TeamspacesMenu()
84
82
  teamspace_resolved = menu._resolve_teamspace(teamspace)
85
83
 
86
- cloud_provider = str(_resolve_deprecated_provider(cloud_provider, provider))
87
-
88
- if cloud_provider is not None:
89
- cloud_account_api = CloudAccountApi()
90
- cloud_account = cloud_account_api.resolve_cloud_account(
91
- teamspace_resolved.id, cloud_account, cloud_provider, teamspace_resolved.default_cloud_account
92
- )
93
-
94
84
  # default cloud account to current studios cloud account if run from studio
95
85
  # else it will fall back to teamspace default in the backend
96
86
  if cloud_account is None:
@@ -107,11 +97,19 @@ def studio(
107
97
  console.print(f"Studio with name {name} already exists. Using {new_name} instead.")
108
98
  name = new_name
109
99
 
110
- studio = Studio(name=name, teamspace=teamspace_resolved, cloud_account=cloud_account, create_ok=True)
100
+ studio = Studio(
101
+ name=name,
102
+ teamspace=teamspace_resolved,
103
+ cloud_account=cloud_account,
104
+ create_ok=True,
105
+ cloud_provider=cloud_provider,
106
+ provider=provider,
107
+ )
111
108
 
112
109
  console.print(f"Created Studio {studio.name}.")
113
110
 
114
111
  if start is not None:
115
112
  start_machine = getattr(Machine, start, start)
113
+ Studio.show_progress = True
116
114
  studio.start(start_machine)
117
115
  console.print(f"Started Studio {studio.name} on machine {start}")
@@ -103,4 +103,5 @@ def studio(
103
103
  except KeyError:
104
104
  resolved_machine = machine
105
105
 
106
+ Studio.show_progress = True
106
107
  studio.start(resolved_machine)
@@ -59,4 +59,5 @@ def studio(name: str, teamspace: Optional[str] = None, machine: str = "CPU") ->
59
59
  except KeyError:
60
60
  resolved_machine = machine
61
61
 
62
+ Studio.show_progress = True
62
63
  studio.switch_machine(resolved_machine)
@@ -77,5 +77,6 @@ def start_studio(
77
77
  raise ValueError(f"Could not start Studio: '{studio_name}'. Does the Studio exist?") from None
78
78
  raise ValueError(f"Could not start Studio: '{studio_name}'. Please provide a Studio name") from None
79
79
 
80
+ Studio.show_progress = True
80
81
  studio.start(machine, interruptible=interruptible)
81
82
  click.echo(f"Studio '{studio.name}' started successfully")
@@ -47,6 +47,7 @@ def switch_studio(
47
47
  raise ValueError(f"Could not switch Studio: '{studio_name}'. Please provide a Studio name") from None
48
48
 
49
49
  resolved_machine = Machine.from_str(machine)
50
+ Studio.show_progress = True
50
51
  studio.switch_machine(resolved_machine, interruptible=interruptible)
51
52
 
52
53
  click.echo(f"Studio '{studio.name}' switched to machine '{resolved_machine}' successfully")
@@ -853,6 +853,7 @@ from lightning_sdk.lightning_cloud.openapi.models.v1_quest import V1Quest
853
853
  from lightning_sdk.lightning_cloud.openapi.models.v1_quest_status import V1QuestStatus
854
854
  from lightning_sdk.lightning_cloud.openapi.models.v1_queue_server_type import V1QueueServerType
855
855
  from lightning_sdk.lightning_cloud.openapi.models.v1_quotas import V1Quotas
856
+ from lightning_sdk.lightning_cloud.openapi.models.v1_quote_annual_upsell_response import V1QuoteAnnualUpsellResponse
856
857
  from lightning_sdk.lightning_cloud.openapi.models.v1_quote_subscription_response import V1QuoteSubscriptionResponse
857
858
  from lightning_sdk.lightning_cloud.openapi.models.v1_r2_data_connection import V1R2DataConnection
858
859
  from lightning_sdk.lightning_cloud.openapi.models.v1_refresh_index_response import V1RefreshIndexResponse
@@ -1356,6 +1356,91 @@ class BillingServiceApi(object):
1356
1356
  _request_timeout=params.get('_request_timeout'),
1357
1357
  collection_formats=collection_formats)
1358
1358
 
1359
+ def billing_service_quote_annual_upsell(self, **kwargs) -> 'V1QuoteAnnualUpsellResponse': # noqa: E501
1360
+ """billing_service_quote_annual_upsell # noqa: E501
1361
+
1362
+ This method makes a synchronous HTTP request by default. To make an
1363
+ asynchronous HTTP request, please pass async_req=True
1364
+ >>> thread = api.billing_service_quote_annual_upsell(async_req=True)
1365
+ >>> result = thread.get()
1366
+
1367
+ :param async_req bool
1368
+ :return: V1QuoteAnnualUpsellResponse
1369
+ If the method is called asynchronously,
1370
+ returns the request thread.
1371
+ """
1372
+ kwargs['_return_http_data_only'] = True
1373
+ if kwargs.get('async_req'):
1374
+ return self.billing_service_quote_annual_upsell_with_http_info(**kwargs) # noqa: E501
1375
+ else:
1376
+ (data) = self.billing_service_quote_annual_upsell_with_http_info(**kwargs) # noqa: E501
1377
+ return data
1378
+
1379
+ def billing_service_quote_annual_upsell_with_http_info(self, **kwargs) -> 'V1QuoteAnnualUpsellResponse': # noqa: E501
1380
+ """billing_service_quote_annual_upsell # noqa: E501
1381
+
1382
+ This method makes a synchronous HTTP request by default. To make an
1383
+ asynchronous HTTP request, please pass async_req=True
1384
+ >>> thread = api.billing_service_quote_annual_upsell_with_http_info(async_req=True)
1385
+ >>> result = thread.get()
1386
+
1387
+ :param async_req bool
1388
+ :return: V1QuoteAnnualUpsellResponse
1389
+ If the method is called asynchronously,
1390
+ returns the request thread.
1391
+ """
1392
+
1393
+ all_params = [] # noqa: E501
1394
+ all_params.append('async_req')
1395
+ all_params.append('_return_http_data_only')
1396
+ all_params.append('_preload_content')
1397
+ all_params.append('_request_timeout')
1398
+
1399
+ params = locals()
1400
+ for key, val in six.iteritems(params['kwargs']):
1401
+ if key not in all_params:
1402
+ raise TypeError(
1403
+ "Got an unexpected keyword argument '%s'"
1404
+ " to method billing_service_quote_annual_upsell" % key
1405
+ )
1406
+ params[key] = val
1407
+ del params['kwargs']
1408
+
1409
+ collection_formats = {}
1410
+
1411
+ path_params = {}
1412
+
1413
+ query_params = []
1414
+
1415
+ header_params = {}
1416
+
1417
+ form_params = []
1418
+ local_var_files = {}
1419
+
1420
+ body_params = None
1421
+ # HTTP header `Accept`
1422
+ header_params['Accept'] = self.api_client.select_header_accept(
1423
+ ['application/json']) # noqa: E501
1424
+
1425
+ # Authentication setting
1426
+ auth_settings = [] # noqa: E501
1427
+
1428
+ return self.api_client.call_api(
1429
+ '/v1/billing/annual-upsell', 'GET',
1430
+ path_params,
1431
+ query_params,
1432
+ header_params,
1433
+ body=body_params,
1434
+ post_params=form_params,
1435
+ files=local_var_files,
1436
+ response_type='V1QuoteAnnualUpsellResponse', # noqa: E501
1437
+ auth_settings=auth_settings,
1438
+ async_req=params.get('async_req'),
1439
+ _return_http_data_only=params.get('_return_http_data_only'),
1440
+ _preload_content=params.get('_preload_content', True),
1441
+ _request_timeout=params.get('_request_timeout'),
1442
+ collection_formats=collection_formats)
1443
+
1359
1444
  def billing_service_quote_subscription(self, **kwargs) -> 'V1QuoteSubscriptionResponse': # noqa: E501
1360
1445
  """billing_service_quote_subscription # noqa: E501
1361
1446
 
@@ -805,6 +805,7 @@ from lightning_sdk.lightning_cloud.openapi.models.v1_quest import V1Quest
805
805
  from lightning_sdk.lightning_cloud.openapi.models.v1_quest_status import V1QuestStatus
806
806
  from lightning_sdk.lightning_cloud.openapi.models.v1_queue_server_type import V1QueueServerType
807
807
  from lightning_sdk.lightning_cloud.openapi.models.v1_quotas import V1Quotas
808
+ from lightning_sdk.lightning_cloud.openapi.models.v1_quote_annual_upsell_response import V1QuoteAnnualUpsellResponse
808
809
  from lightning_sdk.lightning_cloud.openapi.models.v1_quote_subscription_response import V1QuoteSubscriptionResponse
809
810
  from lightning_sdk.lightning_cloud.openapi.models.v1_r2_data_connection import V1R2DataConnection
810
811
  from lightning_sdk.lightning_cloud.openapi.models.v1_refresh_index_response import V1RefreshIndexResponse
@@ -46,7 +46,7 @@ class AssistantIdConversationsBody(object):
46
46
  'conversation_id': 'str',
47
47
  'ephemeral': 'bool',
48
48
  'internal_conversation': 'bool',
49
- 'max_tokens': 'str',
49
+ 'max_completion_tokens': 'str',
50
50
  'message': 'V1Message',
51
51
  'metadata': 'dict(str, str)',
52
52
  'name': 'str',
@@ -66,7 +66,7 @@ class AssistantIdConversationsBody(object):
66
66
  'conversation_id': 'conversationId',
67
67
  'ephemeral': 'ephemeral',
68
68
  'internal_conversation': 'internalConversation',
69
- 'max_tokens': 'maxTokens',
69
+ 'max_completion_tokens': 'maxCompletionTokens',
70
70
  'message': 'message',
71
71
  'metadata': 'metadata',
72
72
  'name': 'name',
@@ -80,14 +80,14 @@ class AssistantIdConversationsBody(object):
80
80
  'tools': 'tools'
81
81
  }
82
82
 
83
- def __init__(self, auto_name: 'bool' =None, billing_project_id: 'str' =None, conversation_id: 'str' =None, ephemeral: 'bool' =None, internal_conversation: 'bool' =None, max_tokens: 'str' =None, message: 'V1Message' =None, metadata: 'dict(str, str)' =None, name: 'str' =None, parent_conversation_id: 'str' =None, parent_message_id: 'str' =None, reasoning_effort: 'str' =None, sent_at: 'datetime' =None, store: 'bool' =None, stream: 'bool' =None, system_prompt: 'str' =None, tools: 'list[V1Tool]' =None): # noqa: E501
83
+ def __init__(self, auto_name: 'bool' =None, billing_project_id: 'str' =None, conversation_id: 'str' =None, ephemeral: 'bool' =None, internal_conversation: 'bool' =None, max_completion_tokens: 'str' =None, message: 'V1Message' =None, metadata: 'dict(str, str)' =None, name: 'str' =None, parent_conversation_id: 'str' =None, parent_message_id: 'str' =None, reasoning_effort: 'str' =None, sent_at: 'datetime' =None, store: 'bool' =None, stream: 'bool' =None, system_prompt: 'str' =None, tools: 'list[V1Tool]' =None): # noqa: E501
84
84
  """AssistantIdConversationsBody - a model defined in Swagger""" # noqa: E501
85
85
  self._auto_name = None
86
86
  self._billing_project_id = None
87
87
  self._conversation_id = None
88
88
  self._ephemeral = None
89
89
  self._internal_conversation = None
90
- self._max_tokens = None
90
+ self._max_completion_tokens = None
91
91
  self._message = None
92
92
  self._metadata = None
93
93
  self._name = None
@@ -110,8 +110,8 @@ class AssistantIdConversationsBody(object):
110
110
  self.ephemeral = ephemeral
111
111
  if internal_conversation is not None:
112
112
  self.internal_conversation = internal_conversation
113
- if max_tokens is not None:
114
- self.max_tokens = max_tokens
113
+ if max_completion_tokens is not None:
114
+ self.max_completion_tokens = max_completion_tokens
115
115
  if message is not None:
116
116
  self.message = message
117
117
  if metadata is not None:
@@ -241,25 +241,25 @@ class AssistantIdConversationsBody(object):
241
241
  self._internal_conversation = internal_conversation
242
242
 
243
243
  @property
244
- def max_tokens(self) -> 'str':
245
- """Gets the max_tokens of this AssistantIdConversationsBody. # noqa: E501
244
+ def max_completion_tokens(self) -> 'str':
245
+ """Gets the max_completion_tokens of this AssistantIdConversationsBody. # noqa: E501
246
246
 
247
247
 
248
- :return: The max_tokens of this AssistantIdConversationsBody. # noqa: E501
248
+ :return: The max_completion_tokens of this AssistantIdConversationsBody. # noqa: E501
249
249
  :rtype: str
250
250
  """
251
- return self._max_tokens
251
+ return self._max_completion_tokens
252
252
 
253
- @max_tokens.setter
254
- def max_tokens(self, max_tokens: 'str'):
255
- """Sets the max_tokens of this AssistantIdConversationsBody.
253
+ @max_completion_tokens.setter
254
+ def max_completion_tokens(self, max_completion_tokens: 'str'):
255
+ """Sets the max_completion_tokens of this AssistantIdConversationsBody.
256
256
 
257
257
 
258
- :param max_tokens: The max_tokens of this AssistantIdConversationsBody. # noqa: E501
258
+ :param max_completion_tokens: The max_completion_tokens of this AssistantIdConversationsBody. # noqa: E501
259
259
  :type: str
260
260
  """
261
261
 
262
- self._max_tokens = max_tokens
262
+ self._max_completion_tokens = max_completion_tokens
263
263
 
264
264
  @property
265
265
  def message(self) -> 'V1Message':
@@ -43,6 +43,8 @@ class V1PodMetrics(object):
43
43
  swagger_types = {
44
44
  'cpu_usage': 'float',
45
45
  'labels': 'dict(str, str)',
46
+ 'max_gpu_temp_recorded': 'float',
47
+ 'max_power_per_gpu': 'float',
46
48
  'mem_usage': 'float',
47
49
  'namespace': 'str',
48
50
  'node_name': 'str',
@@ -50,6 +52,10 @@ class V1PodMetrics(object):
50
52
  'num_cpus_request': 'float',
51
53
  'num_gpus': 'float',
52
54
  'per_gpu_mem_used': 'dict(str, float)',
55
+ 'per_gpu_power_usage_watts': 'dict(str, float)',
56
+ 'per_gpu_sm_active': 'dict(str, float)',
57
+ 'per_gpu_sm_occupancy': 'dict(str, float)',
58
+ 'per_gpu_temperature_c': 'dict(str, float)',
53
59
  'per_gpu_util': 'dict(str, float)',
54
60
  'phase': 'str',
55
61
  'pod_id': 'str',
@@ -61,6 +67,8 @@ class V1PodMetrics(object):
61
67
  attribute_map = {
62
68
  'cpu_usage': 'cpuUsage',
63
69
  'labels': 'labels',
70
+ 'max_gpu_temp_recorded': 'maxGpuTempRecorded',
71
+ 'max_power_per_gpu': 'maxPowerPerGpu',
64
72
  'mem_usage': 'memUsage',
65
73
  'namespace': 'namespace',
66
74
  'node_name': 'nodeName',
@@ -68,6 +76,10 @@ class V1PodMetrics(object):
68
76
  'num_cpus_request': 'numCpusRequest',
69
77
  'num_gpus': 'numGpus',
70
78
  'per_gpu_mem_used': 'perGpuMemUsed',
79
+ 'per_gpu_power_usage_watts': 'perGpuPowerUsageWatts',
80
+ 'per_gpu_sm_active': 'perGpuSmActive',
81
+ 'per_gpu_sm_occupancy': 'perGpuSmOccupancy',
82
+ 'per_gpu_temperature_c': 'perGpuTemperatureC',
71
83
  'per_gpu_util': 'perGpuUtil',
72
84
  'phase': 'phase',
73
85
  'pod_id': 'podId',
@@ -76,10 +88,12 @@ class V1PodMetrics(object):
76
88
  'user_id': 'userId'
77
89
  }
78
90
 
79
- def __init__(self, cpu_usage: 'float' =None, labels: 'dict(str, str)' =None, mem_usage: 'float' =None, namespace: 'str' =None, node_name: 'str' =None, num_cpus_limit: 'float' =None, num_cpus_request: 'float' =None, num_gpus: 'float' =None, per_gpu_mem_used: 'dict(str, float)' =None, per_gpu_util: 'dict(str, float)' =None, phase: 'str' =None, pod_id: 'str' =None, pod_name: 'str' =None, timestamp: 'datetime' =None, user_id: 'str' =None): # noqa: E501
91
+ def __init__(self, cpu_usage: 'float' =None, labels: 'dict(str, str)' =None, max_gpu_temp_recorded: 'float' =None, max_power_per_gpu: 'float' =None, mem_usage: 'float' =None, namespace: 'str' =None, node_name: 'str' =None, num_cpus_limit: 'float' =None, num_cpus_request: 'float' =None, num_gpus: 'float' =None, per_gpu_mem_used: 'dict(str, float)' =None, per_gpu_power_usage_watts: 'dict(str, float)' =None, per_gpu_sm_active: 'dict(str, float)' =None, per_gpu_sm_occupancy: 'dict(str, float)' =None, per_gpu_temperature_c: 'dict(str, float)' =None, per_gpu_util: 'dict(str, float)' =None, phase: 'str' =None, pod_id: 'str' =None, pod_name: 'str' =None, timestamp: 'datetime' =None, user_id: 'str' =None): # noqa: E501
80
92
  """V1PodMetrics - a model defined in Swagger""" # noqa: E501
81
93
  self._cpu_usage = None
82
94
  self._labels = None
95
+ self._max_gpu_temp_recorded = None
96
+ self._max_power_per_gpu = None
83
97
  self._mem_usage = None
84
98
  self._namespace = None
85
99
  self._node_name = None
@@ -87,6 +101,10 @@ class V1PodMetrics(object):
87
101
  self._num_cpus_request = None
88
102
  self._num_gpus = None
89
103
  self._per_gpu_mem_used = None
104
+ self._per_gpu_power_usage_watts = None
105
+ self._per_gpu_sm_active = None
106
+ self._per_gpu_sm_occupancy = None
107
+ self._per_gpu_temperature_c = None
90
108
  self._per_gpu_util = None
91
109
  self._phase = None
92
110
  self._pod_id = None
@@ -98,6 +116,10 @@ class V1PodMetrics(object):
98
116
  self.cpu_usage = cpu_usage
99
117
  if labels is not None:
100
118
  self.labels = labels
119
+ if max_gpu_temp_recorded is not None:
120
+ self.max_gpu_temp_recorded = max_gpu_temp_recorded
121
+ if max_power_per_gpu is not None:
122
+ self.max_power_per_gpu = max_power_per_gpu
101
123
  if mem_usage is not None:
102
124
  self.mem_usage = mem_usage
103
125
  if namespace is not None:
@@ -112,6 +134,14 @@ class V1PodMetrics(object):
112
134
  self.num_gpus = num_gpus
113
135
  if per_gpu_mem_used is not None:
114
136
  self.per_gpu_mem_used = per_gpu_mem_used
137
+ if per_gpu_power_usage_watts is not None:
138
+ self.per_gpu_power_usage_watts = per_gpu_power_usage_watts
139
+ if per_gpu_sm_active is not None:
140
+ self.per_gpu_sm_active = per_gpu_sm_active
141
+ if per_gpu_sm_occupancy is not None:
142
+ self.per_gpu_sm_occupancy = per_gpu_sm_occupancy
143
+ if per_gpu_temperature_c is not None:
144
+ self.per_gpu_temperature_c = per_gpu_temperature_c
115
145
  if per_gpu_util is not None:
116
146
  self.per_gpu_util = per_gpu_util
117
147
  if phase is not None:
@@ -167,6 +197,48 @@ class V1PodMetrics(object):
167
197
 
168
198
  self._labels = labels
169
199
 
200
+ @property
201
+ def max_gpu_temp_recorded(self) -> 'float':
202
+ """Gets the max_gpu_temp_recorded of this V1PodMetrics. # noqa: E501
203
+
204
+
205
+ :return: The max_gpu_temp_recorded of this V1PodMetrics. # noqa: E501
206
+ :rtype: float
207
+ """
208
+ return self._max_gpu_temp_recorded
209
+
210
+ @max_gpu_temp_recorded.setter
211
+ def max_gpu_temp_recorded(self, max_gpu_temp_recorded: 'float'):
212
+ """Sets the max_gpu_temp_recorded of this V1PodMetrics.
213
+
214
+
215
+ :param max_gpu_temp_recorded: The max_gpu_temp_recorded of this V1PodMetrics. # noqa: E501
216
+ :type: float
217
+ """
218
+
219
+ self._max_gpu_temp_recorded = max_gpu_temp_recorded
220
+
221
+ @property
222
+ def max_power_per_gpu(self) -> 'float':
223
+ """Gets the max_power_per_gpu of this V1PodMetrics. # noqa: E501
224
+
225
+
226
+ :return: The max_power_per_gpu of this V1PodMetrics. # noqa: E501
227
+ :rtype: float
228
+ """
229
+ return self._max_power_per_gpu
230
+
231
+ @max_power_per_gpu.setter
232
+ def max_power_per_gpu(self, max_power_per_gpu: 'float'):
233
+ """Sets the max_power_per_gpu of this V1PodMetrics.
234
+
235
+
236
+ :param max_power_per_gpu: The max_power_per_gpu of this V1PodMetrics. # noqa: E501
237
+ :type: float
238
+ """
239
+
240
+ self._max_power_per_gpu = max_power_per_gpu
241
+
170
242
  @property
171
243
  def mem_usage(self) -> 'float':
172
244
  """Gets the mem_usage of this V1PodMetrics. # noqa: E501
@@ -314,6 +386,90 @@ class V1PodMetrics(object):
314
386
 
315
387
  self._per_gpu_mem_used = per_gpu_mem_used
316
388
 
389
+ @property
390
+ def per_gpu_power_usage_watts(self) -> 'dict(str, float)':
391
+ """Gets the per_gpu_power_usage_watts of this V1PodMetrics. # noqa: E501
392
+
393
+
394
+ :return: The per_gpu_power_usage_watts of this V1PodMetrics. # noqa: E501
395
+ :rtype: dict(str, float)
396
+ """
397
+ return self._per_gpu_power_usage_watts
398
+
399
+ @per_gpu_power_usage_watts.setter
400
+ def per_gpu_power_usage_watts(self, per_gpu_power_usage_watts: 'dict(str, float)'):
401
+ """Sets the per_gpu_power_usage_watts of this V1PodMetrics.
402
+
403
+
404
+ :param per_gpu_power_usage_watts: The per_gpu_power_usage_watts of this V1PodMetrics. # noqa: E501
405
+ :type: dict(str, float)
406
+ """
407
+
408
+ self._per_gpu_power_usage_watts = per_gpu_power_usage_watts
409
+
410
+ @property
411
+ def per_gpu_sm_active(self) -> 'dict(str, float)':
412
+ """Gets the per_gpu_sm_active of this V1PodMetrics. # noqa: E501
413
+
414
+
415
+ :return: The per_gpu_sm_active of this V1PodMetrics. # noqa: E501
416
+ :rtype: dict(str, float)
417
+ """
418
+ return self._per_gpu_sm_active
419
+
420
+ @per_gpu_sm_active.setter
421
+ def per_gpu_sm_active(self, per_gpu_sm_active: 'dict(str, float)'):
422
+ """Sets the per_gpu_sm_active of this V1PodMetrics.
423
+
424
+
425
+ :param per_gpu_sm_active: The per_gpu_sm_active of this V1PodMetrics. # noqa: E501
426
+ :type: dict(str, float)
427
+ """
428
+
429
+ self._per_gpu_sm_active = per_gpu_sm_active
430
+
431
+ @property
432
+ def per_gpu_sm_occupancy(self) -> 'dict(str, float)':
433
+ """Gets the per_gpu_sm_occupancy of this V1PodMetrics. # noqa: E501
434
+
435
+
436
+ :return: The per_gpu_sm_occupancy of this V1PodMetrics. # noqa: E501
437
+ :rtype: dict(str, float)
438
+ """
439
+ return self._per_gpu_sm_occupancy
440
+
441
+ @per_gpu_sm_occupancy.setter
442
+ def per_gpu_sm_occupancy(self, per_gpu_sm_occupancy: 'dict(str, float)'):
443
+ """Sets the per_gpu_sm_occupancy of this V1PodMetrics.
444
+
445
+
446
+ :param per_gpu_sm_occupancy: The per_gpu_sm_occupancy of this V1PodMetrics. # noqa: E501
447
+ :type: dict(str, float)
448
+ """
449
+
450
+ self._per_gpu_sm_occupancy = per_gpu_sm_occupancy
451
+
452
+ @property
453
+ def per_gpu_temperature_c(self) -> 'dict(str, float)':
454
+ """Gets the per_gpu_temperature_c of this V1PodMetrics. # noqa: E501
455
+
456
+
457
+ :return: The per_gpu_temperature_c of this V1PodMetrics. # noqa: E501
458
+ :rtype: dict(str, float)
459
+ """
460
+ return self._per_gpu_temperature_c
461
+
462
+ @per_gpu_temperature_c.setter
463
+ def per_gpu_temperature_c(self, per_gpu_temperature_c: 'dict(str, float)'):
464
+ """Sets the per_gpu_temperature_c of this V1PodMetrics.
465
+
466
+
467
+ :param per_gpu_temperature_c: The per_gpu_temperature_c of this V1PodMetrics. # noqa: E501
468
+ :type: dict(str, float)
469
+ """
470
+
471
+ self._per_gpu_temperature_c = per_gpu_temperature_c
472
+
317
473
  @property
318
474
  def per_gpu_util(self) -> 'dict(str, float)':
319
475
  """Gets the per_gpu_util of this V1PodMetrics. # noqa: E501