pulumi-oci 2.1.0a1719958917__py3-none-any.whl → 2.1.0a1720054142__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumi_oci/__init__.py +43 -0
- pulumi_oci/database/__init__.py +11 -0
- pulumi_oci/database/_inputs.py +607 -0
- pulumi_oci/database/db_node.py +28 -0
- pulumi_oci/database/exadb_vm_cluster.py +1761 -0
- pulumi_oci/database/exascale_db_storage_vault.py +787 -0
- pulumi_oci/database/get_backups.py +22 -5
- pulumi_oci/database/get_db_node.py +14 -1
- pulumi_oci/database/get_db_nodes.py +2 -2
- pulumi_oci/database/get_exadb_vm_cluster.py +614 -0
- pulumi_oci/database/get_exadb_vm_cluster_update.py +226 -0
- pulumi_oci/database/get_exadb_vm_cluster_update_history_entries.py +153 -0
- pulumi_oci/database/get_exadb_vm_cluster_update_history_entry.py +226 -0
- pulumi_oci/database/get_exadb_vm_cluster_updates.py +173 -0
- pulumi_oci/database/get_exadb_vm_clusters.py +196 -0
- pulumi_oci/database/get_exascale_db_storage_vault.py +301 -0
- pulumi_oci/database/get_exascale_db_storage_vaults.py +176 -0
- pulumi_oci/database/get_gi_version_minor_versions.py +221 -0
- pulumi_oci/database/get_gi_versions.py +22 -5
- pulumi_oci/database/outputs.py +2050 -0
- pulumi_oci/database/pluggable_database.py +7 -7
- pulumi_oci/databasemigration/__init__.py +6 -0
- pulumi_oci/databasemigration/_inputs.py +1577 -0
- pulumi_oci/databasemigration/connection.py +2019 -0
- pulumi_oci/databasemigration/get_connection.py +616 -0
- pulumi_oci/databasemigration/get_connections.py +225 -0
- pulumi_oci/databasemigration/get_job_advisor_report.py +2 -10
- pulumi_oci/databasemigration/get_migration.py +427 -0
- pulumi_oci/databasemigration/get_migration_object_types.py +24 -13
- pulumi_oci/databasemigration/get_migrations.py +407 -0
- pulumi_oci/databasemigration/job.py +16 -20
- pulumi_oci/databasemigration/migration.py +1471 -0
- pulumi_oci/databasemigration/outputs.py +4301 -73
- pulumi_oci/filestorage/_inputs.py +10 -18
- pulumi_oci/filestorage/export.py +28 -7
- pulumi_oci/filestorage/file_system.py +159 -35
- pulumi_oci/filestorage/outputs.py +55 -34
- pulumi_oci/generativeai/_inputs.py +50 -2
- pulumi_oci/generativeai/dedicated_ai_cluster.py +30 -2
- pulumi_oci/generativeai/endpoint.py +2 -2
- pulumi_oci/generativeai/get_dedicated_ai_cluster.py +2 -47
- pulumi_oci/generativeai/get_dedicated_ai_clusters.py +2 -14
- pulumi_oci/generativeai/get_endpoint.py +2 -26
- pulumi_oci/generativeai/get_endpoints.py +2 -8
- pulumi_oci/generativeai/get_model.py +2 -38
- pulumi_oci/generativeai/get_models.py +2 -8
- pulumi_oci/generativeai/model.py +2 -2
- pulumi_oci/generativeai/outputs.py +86 -310
- pulumi_oci/pulumi-plugin.json +1 -1
- pulumi_oci/resourcescheduler/__init__.py +12 -0
- pulumi_oci/resourcescheduler/_inputs.py +224 -0
- pulumi_oci/resourcescheduler/get_schedule.py +340 -0
- pulumi_oci/resourcescheduler/get_schedules.py +193 -0
- pulumi_oci/resourcescheduler/outputs.py +687 -0
- pulumi_oci/resourcescheduler/schedule.py +977 -0
- {pulumi_oci-2.1.0a1719958917.dist-info → pulumi_oci-2.1.0a1720054142.dist-info}/METADATA +1 -1
- {pulumi_oci-2.1.0a1719958917.dist-info → pulumi_oci-2.1.0a1720054142.dist-info}/RECORD +59 -36
- {pulumi_oci-2.1.0a1719958917.dist-info → pulumi_oci-2.1.0a1720054142.dist-info}/WHEEL +0 -0
- {pulumi_oci-2.1.0a1719958917.dist-info → pulumi_oci-2.1.0a1720054142.dist-info}/top_level.txt +0 -0
@@ -170,7 +170,7 @@ class ModelFineTuneDetails(dict):
|
|
170
170
|
:param str dedicated_ai_cluster_id: The OCID of the dedicated AI cluster this fine-tuning runs on.
|
171
171
|
:param 'ModelFineTuneDetailsTrainingDatasetArgs' training_dataset: The dataset used to fine-tune the model.
|
172
172
|
|
173
|
-
Only one dataset is allowed per custom model, which is split
|
173
|
+
Only one dataset is allowed per custom model, which is split 80-20 for training and validating. You must provide the dataset in a JSON Lines (JSONL) file. Each line in the JSONL file must have the format: `{"prompt": "<first prompt>", "completion": "<expected completion given first prompt>"}`
|
174
174
|
:param 'ModelFineTuneDetailsTrainingConfigArgs' training_config: The fine-tuning method and hyperparameters used for fine-tuning a custom model.
|
175
175
|
"""
|
176
176
|
pulumi.set(__self__, "dedicated_ai_cluster_id", dedicated_ai_cluster_id)
|
@@ -192,7 +192,7 @@ class ModelFineTuneDetails(dict):
|
|
192
192
|
"""
|
193
193
|
The dataset used to fine-tune the model.
|
194
194
|
|
195
|
-
Only one dataset is allowed per custom model, which is split
|
195
|
+
Only one dataset is allowed per custom model, which is split 80-20 for training and validating. You must provide the dataset in a JSON Lines (JSONL) file. Each line in the JSONL file must have the format: `{"prompt": "<first prompt>", "completion": "<expected completion given first prompt>"}`
|
196
196
|
"""
|
197
197
|
return pulumi.get(self, "training_dataset")
|
198
198
|
|
@@ -220,6 +220,12 @@ class ModelFineTuneDetailsTrainingConfig(dict):
|
|
220
220
|
suggest = "learning_rate"
|
221
221
|
elif key == "logModelMetricsIntervalInSteps":
|
222
222
|
suggest = "log_model_metrics_interval_in_steps"
|
223
|
+
elif key == "loraAlpha":
|
224
|
+
suggest = "lora_alpha"
|
225
|
+
elif key == "loraDropout":
|
226
|
+
suggest = "lora_dropout"
|
227
|
+
elif key == "loraR":
|
228
|
+
suggest = "lora_r"
|
223
229
|
elif key == "numOfLastLayers":
|
224
230
|
suggest = "num_of_last_layers"
|
225
231
|
elif key == "totalTrainingEpochs":
|
@@ -244,6 +250,9 @@ class ModelFineTuneDetailsTrainingConfig(dict):
|
|
244
250
|
early_stopping_threshold: Optional[float] = None,
|
245
251
|
learning_rate: Optional[float] = None,
|
246
252
|
log_model_metrics_interval_in_steps: Optional[int] = None,
|
253
|
+
lora_alpha: Optional[int] = None,
|
254
|
+
lora_dropout: Optional[float] = None,
|
255
|
+
lora_r: Optional[int] = None,
|
247
256
|
num_of_last_layers: Optional[int] = None,
|
248
257
|
total_training_epochs: Optional[int] = None,
|
249
258
|
training_batch_size: Optional[int] = None):
|
@@ -255,6 +264,9 @@ class ModelFineTuneDetailsTrainingConfig(dict):
|
|
255
264
|
:param int log_model_metrics_interval_in_steps: Determines how frequently to log model metrics.
|
256
265
|
|
257
266
|
Every step is logged for the first 20 steps and then follows this parameter for log frequency. Set to 0 to disable logging the model metrics.
|
267
|
+
:param int lora_alpha: This parameter represents the scaling factor for the weight matrices in LoRA.
|
268
|
+
:param float lora_dropout: This parameter indicates the dropout probability for LoRA layers.
|
269
|
+
:param int lora_r: This parameter represents the LoRA rank of the update matrices.
|
258
270
|
:param int num_of_last_layers: The number of last layers to be fine-tuned.
|
259
271
|
:param int total_training_epochs: The maximum number of training epochs to run for.
|
260
272
|
:param int training_batch_size: The batch size used during training.
|
@@ -268,6 +280,12 @@ class ModelFineTuneDetailsTrainingConfig(dict):
|
|
268
280
|
pulumi.set(__self__, "learning_rate", learning_rate)
|
269
281
|
if log_model_metrics_interval_in_steps is not None:
|
270
282
|
pulumi.set(__self__, "log_model_metrics_interval_in_steps", log_model_metrics_interval_in_steps)
|
283
|
+
if lora_alpha is not None:
|
284
|
+
pulumi.set(__self__, "lora_alpha", lora_alpha)
|
285
|
+
if lora_dropout is not None:
|
286
|
+
pulumi.set(__self__, "lora_dropout", lora_dropout)
|
287
|
+
if lora_r is not None:
|
288
|
+
pulumi.set(__self__, "lora_r", lora_r)
|
271
289
|
if num_of_last_layers is not None:
|
272
290
|
pulumi.set(__self__, "num_of_last_layers", num_of_last_layers)
|
273
291
|
if total_training_epochs is not None:
|
@@ -317,6 +335,30 @@ class ModelFineTuneDetailsTrainingConfig(dict):
|
|
317
335
|
"""
|
318
336
|
return pulumi.get(self, "log_model_metrics_interval_in_steps")
|
319
337
|
|
338
|
+
@property
|
339
|
+
@pulumi.getter(name="loraAlpha")
|
340
|
+
def lora_alpha(self) -> Optional[int]:
|
341
|
+
"""
|
342
|
+
This parameter represents the scaling factor for the weight matrices in LoRA.
|
343
|
+
"""
|
344
|
+
return pulumi.get(self, "lora_alpha")
|
345
|
+
|
346
|
+
@property
|
347
|
+
@pulumi.getter(name="loraDropout")
|
348
|
+
def lora_dropout(self) -> Optional[float]:
|
349
|
+
"""
|
350
|
+
This parameter indicates the dropout probability for LoRA layers.
|
351
|
+
"""
|
352
|
+
return pulumi.get(self, "lora_dropout")
|
353
|
+
|
354
|
+
@property
|
355
|
+
@pulumi.getter(name="loraR")
|
356
|
+
def lora_r(self) -> Optional[int]:
|
357
|
+
"""
|
358
|
+
This parameter represents the LoRA rank of the update matrices.
|
359
|
+
"""
|
360
|
+
return pulumi.get(self, "lora_r")
|
361
|
+
|
320
362
|
@property
|
321
363
|
@pulumi.getter(name="numOfLastLayers")
|
322
364
|
def num_of_last_layers(self) -> Optional[int]:
|
@@ -480,11 +522,6 @@ class GetDedicatedAiClusterCapacityResult(dict):
|
|
480
522
|
capacity_type: str,
|
481
523
|
total_endpoint_capacity: int,
|
482
524
|
used_endpoint_capacity: int):
|
483
|
-
"""
|
484
|
-
:param str capacity_type: The type of the dedicated AI cluster capacity.
|
485
|
-
:param int total_endpoint_capacity: The total number of endpoints that can be hosted on this dedicated AI cluster.
|
486
|
-
:param int used_endpoint_capacity: The number of endpoints hosted on this dedicated AI cluster.
|
487
|
-
"""
|
488
525
|
pulumi.set(__self__, "capacity_type", capacity_type)
|
489
526
|
pulumi.set(__self__, "total_endpoint_capacity", total_endpoint_capacity)
|
490
527
|
pulumi.set(__self__, "used_endpoint_capacity", used_endpoint_capacity)
|
@@ -492,25 +529,16 @@ class GetDedicatedAiClusterCapacityResult(dict):
|
|
492
529
|
@property
|
493
530
|
@pulumi.getter(name="capacityType")
|
494
531
|
def capacity_type(self) -> str:
|
495
|
-
"""
|
496
|
-
The type of the dedicated AI cluster capacity.
|
497
|
-
"""
|
498
532
|
return pulumi.get(self, "capacity_type")
|
499
533
|
|
500
534
|
@property
|
501
535
|
@pulumi.getter(name="totalEndpointCapacity")
|
502
536
|
def total_endpoint_capacity(self) -> int:
|
503
|
-
"""
|
504
|
-
The total number of endpoints that can be hosted on this dedicated AI cluster.
|
505
|
-
"""
|
506
537
|
return pulumi.get(self, "total_endpoint_capacity")
|
507
538
|
|
508
539
|
@property
|
509
540
|
@pulumi.getter(name="usedEndpointCapacity")
|
510
541
|
def used_endpoint_capacity(self) -> int:
|
511
|
-
"""
|
512
|
-
The number of endpoints hosted on this dedicated AI cluster.
|
513
|
-
"""
|
514
542
|
return pulumi.get(self, "used_endpoint_capacity")
|
515
543
|
|
516
544
|
|
@@ -545,21 +573,10 @@ class GetDedicatedAiClustersDedicatedAiClusterCollectionItemResult(dict):
|
|
545
573
|
unit_count: int,
|
546
574
|
unit_shape: str):
|
547
575
|
"""
|
548
|
-
:param Sequence['GetDedicatedAiClustersDedicatedAiClusterCollectionItemCapacityArgs'] capacities: The total capacity for a dedicated AI cluster.
|
549
576
|
:param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to list resources.
|
550
|
-
:param Mapping[str, Any] defined_tags: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
|
551
|
-
:param str description: An optional description of the dedicated AI cluster.
|
552
577
|
:param str display_name: A filter to return only resources that match the given display name exactly.
|
553
|
-
:param Mapping[str, Any] freeform_tags: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
|
554
578
|
:param str id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the dedicated AI cluster.
|
555
|
-
:param str lifecycle_details: A message describing the current state with detail that can provide actionable information.
|
556
579
|
:param str state: A filter to return only the dedicated AI clusters that their lifecycle state matches the given lifecycle state.
|
557
|
-
:param Mapping[str, Any] system_tags: System tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"orcl-cloud.free-tier-retained": "true"}`
|
558
|
-
:param str time_created: The date and time the dedicated AI cluster was created, in the format defined by RFC 3339
|
559
|
-
:param str time_updated: The date and time the dedicated AI cluster was updated, in the format defined by RFC 3339
|
560
|
-
:param str type: The dedicated AI cluster type indicating whether this is a fine-tuning/training processor or hosting/inference processor.
|
561
|
-
:param int unit_count: The number of dedicated units in this AI cluster.
|
562
|
-
:param str unit_shape: The shape of dedicated unit in this AI cluster. The underlying hardware configuration is hidden from customers.
|
563
580
|
"""
|
564
581
|
pulumi.set(__self__, "capacities", capacities)
|
565
582
|
pulumi.set(__self__, "compartment_id", compartment_id)
|
@@ -580,9 +597,6 @@ class GetDedicatedAiClustersDedicatedAiClusterCollectionItemResult(dict):
|
|
580
597
|
@property
|
581
598
|
@pulumi.getter
|
582
599
|
def capacities(self) -> Sequence['outputs.GetDedicatedAiClustersDedicatedAiClusterCollectionItemCapacityResult']:
|
583
|
-
"""
|
584
|
-
The total capacity for a dedicated AI cluster.
|
585
|
-
"""
|
586
600
|
return pulumi.get(self, "capacities")
|
587
601
|
|
588
602
|
@property
|
@@ -596,17 +610,11 @@ class GetDedicatedAiClustersDedicatedAiClusterCollectionItemResult(dict):
|
|
596
610
|
@property
|
597
611
|
@pulumi.getter(name="definedTags")
|
598
612
|
def defined_tags(self) -> Mapping[str, Any]:
|
599
|
-
"""
|
600
|
-
Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
|
601
|
-
"""
|
602
613
|
return pulumi.get(self, "defined_tags")
|
603
614
|
|
604
615
|
@property
|
605
616
|
@pulumi.getter
|
606
617
|
def description(self) -> str:
|
607
|
-
"""
|
608
|
-
An optional description of the dedicated AI cluster.
|
609
|
-
"""
|
610
618
|
return pulumi.get(self, "description")
|
611
619
|
|
612
620
|
@property
|
@@ -620,9 +628,6 @@ class GetDedicatedAiClustersDedicatedAiClusterCollectionItemResult(dict):
|
|
620
628
|
@property
|
621
629
|
@pulumi.getter(name="freeformTags")
|
622
630
|
def freeform_tags(self) -> Mapping[str, Any]:
|
623
|
-
"""
|
624
|
-
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
|
625
|
-
"""
|
626
631
|
return pulumi.get(self, "freeform_tags")
|
627
632
|
|
628
633
|
@property
|
@@ -636,9 +641,6 @@ class GetDedicatedAiClustersDedicatedAiClusterCollectionItemResult(dict):
|
|
636
641
|
@property
|
637
642
|
@pulumi.getter(name="lifecycleDetails")
|
638
643
|
def lifecycle_details(self) -> str:
|
639
|
-
"""
|
640
|
-
A message describing the current state with detail that can provide actionable information.
|
641
|
-
"""
|
642
644
|
return pulumi.get(self, "lifecycle_details")
|
643
645
|
|
644
646
|
@property
|
@@ -652,49 +654,31 @@ class GetDedicatedAiClustersDedicatedAiClusterCollectionItemResult(dict):
|
|
652
654
|
@property
|
653
655
|
@pulumi.getter(name="systemTags")
|
654
656
|
def system_tags(self) -> Mapping[str, Any]:
|
655
|
-
"""
|
656
|
-
System tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"orcl-cloud.free-tier-retained": "true"}`
|
657
|
-
"""
|
658
657
|
return pulumi.get(self, "system_tags")
|
659
658
|
|
660
659
|
@property
|
661
660
|
@pulumi.getter(name="timeCreated")
|
662
661
|
def time_created(self) -> str:
|
663
|
-
"""
|
664
|
-
The date and time the dedicated AI cluster was created, in the format defined by RFC 3339
|
665
|
-
"""
|
666
662
|
return pulumi.get(self, "time_created")
|
667
663
|
|
668
664
|
@property
|
669
665
|
@pulumi.getter(name="timeUpdated")
|
670
666
|
def time_updated(self) -> str:
|
671
|
-
"""
|
672
|
-
The date and time the dedicated AI cluster was updated, in the format defined by RFC 3339
|
673
|
-
"""
|
674
667
|
return pulumi.get(self, "time_updated")
|
675
668
|
|
676
669
|
@property
|
677
670
|
@pulumi.getter
|
678
671
|
def type(self) -> str:
|
679
|
-
"""
|
680
|
-
The dedicated AI cluster type indicating whether this is a fine-tuning/training processor or hosting/inference processor.
|
681
|
-
"""
|
682
672
|
return pulumi.get(self, "type")
|
683
673
|
|
684
674
|
@property
|
685
675
|
@pulumi.getter(name="unitCount")
|
686
676
|
def unit_count(self) -> int:
|
687
|
-
"""
|
688
|
-
The number of dedicated units in this AI cluster.
|
689
|
-
"""
|
690
677
|
return pulumi.get(self, "unit_count")
|
691
678
|
|
692
679
|
@property
|
693
680
|
@pulumi.getter(name="unitShape")
|
694
681
|
def unit_shape(self) -> str:
|
695
|
-
"""
|
696
|
-
The shape of dedicated unit in this AI cluster. The underlying hardware configuration is hidden from customers.
|
697
|
-
"""
|
698
682
|
return pulumi.get(self, "unit_shape")
|
699
683
|
|
700
684
|
|
@@ -704,11 +688,6 @@ class GetDedicatedAiClustersDedicatedAiClusterCollectionItemCapacityResult(dict)
|
|
704
688
|
capacity_type: str,
|
705
689
|
total_endpoint_capacity: int,
|
706
690
|
used_endpoint_capacity: int):
|
707
|
-
"""
|
708
|
-
:param str capacity_type: The type of the dedicated AI cluster capacity.
|
709
|
-
:param int total_endpoint_capacity: The total number of endpoints that can be hosted on this dedicated AI cluster.
|
710
|
-
:param int used_endpoint_capacity: The number of endpoints hosted on this dedicated AI cluster.
|
711
|
-
"""
|
712
691
|
pulumi.set(__self__, "capacity_type", capacity_type)
|
713
692
|
pulumi.set(__self__, "total_endpoint_capacity", total_endpoint_capacity)
|
714
693
|
pulumi.set(__self__, "used_endpoint_capacity", used_endpoint_capacity)
|
@@ -716,25 +695,16 @@ class GetDedicatedAiClustersDedicatedAiClusterCollectionItemCapacityResult(dict)
|
|
716
695
|
@property
|
717
696
|
@pulumi.getter(name="capacityType")
|
718
697
|
def capacity_type(self) -> str:
|
719
|
-
"""
|
720
|
-
The type of the dedicated AI cluster capacity.
|
721
|
-
"""
|
722
698
|
return pulumi.get(self, "capacity_type")
|
723
699
|
|
724
700
|
@property
|
725
701
|
@pulumi.getter(name="totalEndpointCapacity")
|
726
702
|
def total_endpoint_capacity(self) -> int:
|
727
|
-
"""
|
728
|
-
The total number of endpoints that can be hosted on this dedicated AI cluster.
|
729
|
-
"""
|
730
703
|
return pulumi.get(self, "total_endpoint_capacity")
|
731
704
|
|
732
705
|
@property
|
733
706
|
@pulumi.getter(name="usedEndpointCapacity")
|
734
707
|
def used_endpoint_capacity(self) -> int:
|
735
|
-
"""
|
736
|
-
The number of endpoints hosted on this dedicated AI cluster.
|
737
|
-
"""
|
738
708
|
return pulumi.get(self, "used_endpoint_capacity")
|
739
709
|
|
740
710
|
|
@@ -769,17 +739,11 @@ class GetDedicatedAiClustersFilterResult(dict):
|
|
769
739
|
class GetEndpointContentModerationConfigResult(dict):
|
770
740
|
def __init__(__self__, *,
|
771
741
|
is_enabled: bool):
|
772
|
-
"""
|
773
|
-
:param bool is_enabled: Whether to enable the content moderation feature.
|
774
|
-
"""
|
775
742
|
pulumi.set(__self__, "is_enabled", is_enabled)
|
776
743
|
|
777
744
|
@property
|
778
745
|
@pulumi.getter(name="isEnabled")
|
779
746
|
def is_enabled(self) -> bool:
|
780
|
-
"""
|
781
|
-
Whether to enable the content moderation feature.
|
782
|
-
"""
|
783
747
|
return pulumi.get(self, "is_enabled")
|
784
748
|
|
785
749
|
|
@@ -814,17 +778,11 @@ class GetEndpointsEndpointCollectionItemResult(dict):
|
|
814
778
|
time_updated: str):
|
815
779
|
"""
|
816
780
|
:param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to list resources.
|
817
|
-
:param Sequence['GetEndpointsEndpointCollectionItemContentModerationConfigArgs'] content_moderation_configs: The configuration details, whether to add the content moderation feature to the model. Content moderation removes toxic and biased content from responses. It's recommended to use content moderation.
|
818
|
-
:param str dedicated_ai_cluster_id: The OCID of the dedicated AI cluster on which the model will be deployed to.
|
819
781
|
:param Mapping[str, Any] defined_tags: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
|
820
|
-
:param str description: An optional description of the endpoint.
|
821
782
|
:param str display_name: A filter to return only resources that match the given display name exactly.
|
822
|
-
:param Mapping[str, Any] freeform_tags: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
|
823
783
|
:param str id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the endpoint.
|
824
|
-
:param str lifecycle_details: A message describing the current state of the endpoint in more detail that can provide actionable information.
|
825
784
|
:param str model_id: The OCID of the model that's used to create this endpoint.
|
826
785
|
:param str state: A filter to return only resources that their lifecycle state matches the given lifecycle state.
|
827
|
-
:param Mapping[str, Any] system_tags: System tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"orcl-cloud.free-tier-retained": "true"}`
|
828
786
|
:param str time_created: The date and time that the endpoint was created in the format of an RFC3339 datetime string.
|
829
787
|
:param str time_updated: The date and time that the endpoint was updated in the format of an RFC3339 datetime string.
|
830
788
|
"""
|
@@ -854,17 +812,11 @@ class GetEndpointsEndpointCollectionItemResult(dict):
|
|
854
812
|
@property
|
855
813
|
@pulumi.getter(name="contentModerationConfigs")
|
856
814
|
def content_moderation_configs(self) -> Sequence['outputs.GetEndpointsEndpointCollectionItemContentModerationConfigResult']:
|
857
|
-
"""
|
858
|
-
The configuration details, whether to add the content moderation feature to the model. Content moderation removes toxic and biased content from responses. It's recommended to use content moderation.
|
859
|
-
"""
|
860
815
|
return pulumi.get(self, "content_moderation_configs")
|
861
816
|
|
862
817
|
@property
|
863
818
|
@pulumi.getter(name="dedicatedAiClusterId")
|
864
819
|
def dedicated_ai_cluster_id(self) -> str:
|
865
|
-
"""
|
866
|
-
The OCID of the dedicated AI cluster on which the model will be deployed to.
|
867
|
-
"""
|
868
820
|
return pulumi.get(self, "dedicated_ai_cluster_id")
|
869
821
|
|
870
822
|
@property
|
@@ -878,9 +830,6 @@ class GetEndpointsEndpointCollectionItemResult(dict):
|
|
878
830
|
@property
|
879
831
|
@pulumi.getter
|
880
832
|
def description(self) -> str:
|
881
|
-
"""
|
882
|
-
An optional description of the endpoint.
|
883
|
-
"""
|
884
833
|
return pulumi.get(self, "description")
|
885
834
|
|
886
835
|
@property
|
@@ -894,9 +843,6 @@ class GetEndpointsEndpointCollectionItemResult(dict):
|
|
894
843
|
@property
|
895
844
|
@pulumi.getter(name="freeformTags")
|
896
845
|
def freeform_tags(self) -> Mapping[str, Any]:
|
897
|
-
"""
|
898
|
-
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
|
899
|
-
"""
|
900
846
|
return pulumi.get(self, "freeform_tags")
|
901
847
|
|
902
848
|
@property
|
@@ -910,9 +856,6 @@ class GetEndpointsEndpointCollectionItemResult(dict):
|
|
910
856
|
@property
|
911
857
|
@pulumi.getter(name="lifecycleDetails")
|
912
858
|
def lifecycle_details(self) -> str:
|
913
|
-
"""
|
914
|
-
A message describing the current state of the endpoint in more detail that can provide actionable information.
|
915
|
-
"""
|
916
859
|
return pulumi.get(self, "lifecycle_details")
|
917
860
|
|
918
861
|
@property
|
@@ -934,9 +877,6 @@ class GetEndpointsEndpointCollectionItemResult(dict):
|
|
934
877
|
@property
|
935
878
|
@pulumi.getter(name="systemTags")
|
936
879
|
def system_tags(self) -> Mapping[str, Any]:
|
937
|
-
"""
|
938
|
-
System tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"orcl-cloud.free-tier-retained": "true"}`
|
939
|
-
"""
|
940
880
|
return pulumi.get(self, "system_tags")
|
941
881
|
|
942
882
|
@property
|
@@ -960,17 +900,11 @@ class GetEndpointsEndpointCollectionItemResult(dict):
|
|
960
900
|
class GetEndpointsEndpointCollectionItemContentModerationConfigResult(dict):
|
961
901
|
def __init__(__self__, *,
|
962
902
|
is_enabled: bool):
|
963
|
-
"""
|
964
|
-
:param bool is_enabled: Whether to enable the content moderation feature.
|
965
|
-
"""
|
966
903
|
pulumi.set(__self__, "is_enabled", is_enabled)
|
967
904
|
|
968
905
|
@property
|
969
906
|
@pulumi.getter(name="isEnabled")
|
970
907
|
def is_enabled(self) -> bool:
|
971
|
-
"""
|
972
|
-
Whether to enable the content moderation feature.
|
973
|
-
"""
|
974
908
|
return pulumi.get(self, "is_enabled")
|
975
909
|
|
976
910
|
|
@@ -1007,11 +941,6 @@ class GetModelFineTuneDetailResult(dict):
|
|
1007
941
|
dedicated_ai_cluster_id: str,
|
1008
942
|
training_configs: Sequence['outputs.GetModelFineTuneDetailTrainingConfigResult'],
|
1009
943
|
training_datasets: Sequence['outputs.GetModelFineTuneDetailTrainingDatasetResult']):
|
1010
|
-
"""
|
1011
|
-
:param str dedicated_ai_cluster_id: The OCID of the dedicated AI cluster this fine-tuning runs on.
|
1012
|
-
:param Sequence['GetModelFineTuneDetailTrainingConfigArgs'] training_configs: The fine-tuning method and hyperparameters used for fine-tuning a custom model.
|
1013
|
-
:param Sequence['GetModelFineTuneDetailTrainingDatasetArgs'] training_datasets: The dataset used to fine-tune the model.
|
1014
|
-
"""
|
1015
944
|
pulumi.set(__self__, "dedicated_ai_cluster_id", dedicated_ai_cluster_id)
|
1016
945
|
pulumi.set(__self__, "training_configs", training_configs)
|
1017
946
|
pulumi.set(__self__, "training_datasets", training_datasets)
|
@@ -1019,25 +948,16 @@ class GetModelFineTuneDetailResult(dict):
|
|
1019
948
|
@property
|
1020
949
|
@pulumi.getter(name="dedicatedAiClusterId")
|
1021
950
|
def dedicated_ai_cluster_id(self) -> str:
|
1022
|
-
"""
|
1023
|
-
The OCID of the dedicated AI cluster this fine-tuning runs on.
|
1024
|
-
"""
|
1025
951
|
return pulumi.get(self, "dedicated_ai_cluster_id")
|
1026
952
|
|
1027
953
|
@property
|
1028
954
|
@pulumi.getter(name="trainingConfigs")
|
1029
955
|
def training_configs(self) -> Sequence['outputs.GetModelFineTuneDetailTrainingConfigResult']:
|
1030
|
-
"""
|
1031
|
-
The fine-tuning method and hyperparameters used for fine-tuning a custom model.
|
1032
|
-
"""
|
1033
956
|
return pulumi.get(self, "training_configs")
|
1034
957
|
|
1035
958
|
@property
|
1036
959
|
@pulumi.getter(name="trainingDatasets")
|
1037
960
|
def training_datasets(self) -> Sequence['outputs.GetModelFineTuneDetailTrainingDatasetResult']:
|
1038
|
-
"""
|
1039
|
-
The dataset used to fine-tune the model.
|
1040
|
-
"""
|
1041
961
|
return pulumi.get(self, "training_datasets")
|
1042
962
|
|
1043
963
|
|
@@ -1048,24 +968,20 @@ class GetModelFineTuneDetailTrainingConfigResult(dict):
|
|
1048
968
|
early_stopping_threshold: float,
|
1049
969
|
learning_rate: float,
|
1050
970
|
log_model_metrics_interval_in_steps: int,
|
971
|
+
lora_alpha: int,
|
972
|
+
lora_dropout: float,
|
973
|
+
lora_r: int,
|
1051
974
|
num_of_last_layers: int,
|
1052
975
|
total_training_epochs: int,
|
1053
976
|
training_batch_size: int,
|
1054
977
|
training_config_type: str):
|
1055
|
-
"""
|
1056
|
-
:param int early_stopping_patience: Stop training if the loss metric does not improve beyond 'early_stopping_threshold' for this many times of evaluation.
|
1057
|
-
:param float early_stopping_threshold: How much the loss must improve to prevent early stopping.
|
1058
|
-
:param float learning_rate: The initial learning rate to be used during training
|
1059
|
-
:param int log_model_metrics_interval_in_steps: Determines how frequently to log model metrics.
|
1060
|
-
:param int num_of_last_layers: The number of last layers to be fine-tuned.
|
1061
|
-
:param int total_training_epochs: The maximum number of training epochs to run for.
|
1062
|
-
:param int training_batch_size: The batch size used during training.
|
1063
|
-
:param str training_config_type: The fine-tuning method for training a custom model.
|
1064
|
-
"""
|
1065
978
|
pulumi.set(__self__, "early_stopping_patience", early_stopping_patience)
|
1066
979
|
pulumi.set(__self__, "early_stopping_threshold", early_stopping_threshold)
|
1067
980
|
pulumi.set(__self__, "learning_rate", learning_rate)
|
1068
981
|
pulumi.set(__self__, "log_model_metrics_interval_in_steps", log_model_metrics_interval_in_steps)
|
982
|
+
pulumi.set(__self__, "lora_alpha", lora_alpha)
|
983
|
+
pulumi.set(__self__, "lora_dropout", lora_dropout)
|
984
|
+
pulumi.set(__self__, "lora_r", lora_r)
|
1069
985
|
pulumi.set(__self__, "num_of_last_layers", num_of_last_layers)
|
1070
986
|
pulumi.set(__self__, "total_training_epochs", total_training_epochs)
|
1071
987
|
pulumi.set(__self__, "training_batch_size", training_batch_size)
|
@@ -1074,65 +990,56 @@ class GetModelFineTuneDetailTrainingConfigResult(dict):
|
|
1074
990
|
@property
|
1075
991
|
@pulumi.getter(name="earlyStoppingPatience")
|
1076
992
|
def early_stopping_patience(self) -> int:
|
1077
|
-
"""
|
1078
|
-
Stop training if the loss metric does not improve beyond 'early_stopping_threshold' for this many times of evaluation.
|
1079
|
-
"""
|
1080
993
|
return pulumi.get(self, "early_stopping_patience")
|
1081
994
|
|
1082
995
|
@property
|
1083
996
|
@pulumi.getter(name="earlyStoppingThreshold")
|
1084
997
|
def early_stopping_threshold(self) -> float:
|
1085
|
-
"""
|
1086
|
-
How much the loss must improve to prevent early stopping.
|
1087
|
-
"""
|
1088
998
|
return pulumi.get(self, "early_stopping_threshold")
|
1089
999
|
|
1090
1000
|
@property
|
1091
1001
|
@pulumi.getter(name="learningRate")
|
1092
1002
|
def learning_rate(self) -> float:
|
1093
|
-
"""
|
1094
|
-
The initial learning rate to be used during training
|
1095
|
-
"""
|
1096
1003
|
return pulumi.get(self, "learning_rate")
|
1097
1004
|
|
1098
1005
|
@property
|
1099
1006
|
@pulumi.getter(name="logModelMetricsIntervalInSteps")
|
1100
1007
|
def log_model_metrics_interval_in_steps(self) -> int:
|
1101
|
-
"""
|
1102
|
-
Determines how frequently to log model metrics.
|
1103
|
-
"""
|
1104
1008
|
return pulumi.get(self, "log_model_metrics_interval_in_steps")
|
1105
1009
|
|
1010
|
+
@property
|
1011
|
+
@pulumi.getter(name="loraAlpha")
|
1012
|
+
def lora_alpha(self) -> int:
|
1013
|
+
return pulumi.get(self, "lora_alpha")
|
1014
|
+
|
1015
|
+
@property
|
1016
|
+
@pulumi.getter(name="loraDropout")
|
1017
|
+
def lora_dropout(self) -> float:
|
1018
|
+
return pulumi.get(self, "lora_dropout")
|
1019
|
+
|
1020
|
+
@property
|
1021
|
+
@pulumi.getter(name="loraR")
|
1022
|
+
def lora_r(self) -> int:
|
1023
|
+
return pulumi.get(self, "lora_r")
|
1024
|
+
|
1106
1025
|
@property
|
1107
1026
|
@pulumi.getter(name="numOfLastLayers")
|
1108
1027
|
def num_of_last_layers(self) -> int:
|
1109
|
-
"""
|
1110
|
-
The number of last layers to be fine-tuned.
|
1111
|
-
"""
|
1112
1028
|
return pulumi.get(self, "num_of_last_layers")
|
1113
1029
|
|
1114
1030
|
@property
|
1115
1031
|
@pulumi.getter(name="totalTrainingEpochs")
|
1116
1032
|
def total_training_epochs(self) -> int:
|
1117
|
-
"""
|
1118
|
-
The maximum number of training epochs to run for.
|
1119
|
-
"""
|
1120
1033
|
return pulumi.get(self, "total_training_epochs")
|
1121
1034
|
|
1122
1035
|
@property
|
1123
1036
|
@pulumi.getter(name="trainingBatchSize")
|
1124
1037
|
def training_batch_size(self) -> int:
|
1125
|
-
"""
|
1126
|
-
The batch size used during training.
|
1127
|
-
"""
|
1128
1038
|
return pulumi.get(self, "training_batch_size")
|
1129
1039
|
|
1130
1040
|
@property
|
1131
1041
|
@pulumi.getter(name="trainingConfigType")
|
1132
1042
|
def training_config_type(self) -> str:
|
1133
|
-
"""
|
1134
|
-
The fine-tuning method for training a custom model.
|
1135
|
-
"""
|
1136
1043
|
return pulumi.get(self, "training_config_type")
|
1137
1044
|
|
1138
1045
|
|
@@ -1143,12 +1050,6 @@ class GetModelFineTuneDetailTrainingDatasetResult(dict):
|
|
1143
1050
|
dataset_type: str,
|
1144
1051
|
namespace: str,
|
1145
1052
|
object: str):
|
1146
|
-
"""
|
1147
|
-
:param str bucket: The Object Storage bucket name.
|
1148
|
-
:param str dataset_type: The type of the data asset.
|
1149
|
-
:param str namespace: The Object Storage namespace.
|
1150
|
-
:param str object: The Object Storage object name.
|
1151
|
-
"""
|
1152
1053
|
pulumi.set(__self__, "bucket", bucket)
|
1153
1054
|
pulumi.set(__self__, "dataset_type", dataset_type)
|
1154
1055
|
pulumi.set(__self__, "namespace", namespace)
|
@@ -1157,33 +1058,21 @@ class GetModelFineTuneDetailTrainingDatasetResult(dict):
|
|
1157
1058
|
@property
|
1158
1059
|
@pulumi.getter
|
1159
1060
|
def bucket(self) -> str:
|
1160
|
-
"""
|
1161
|
-
The Object Storage bucket name.
|
1162
|
-
"""
|
1163
1061
|
return pulumi.get(self, "bucket")
|
1164
1062
|
|
1165
1063
|
@property
|
1166
1064
|
@pulumi.getter(name="datasetType")
|
1167
1065
|
def dataset_type(self) -> str:
|
1168
|
-
"""
|
1169
|
-
The type of the data asset.
|
1170
|
-
"""
|
1171
1066
|
return pulumi.get(self, "dataset_type")
|
1172
1067
|
|
1173
1068
|
@property
|
1174
1069
|
@pulumi.getter
|
1175
1070
|
def namespace(self) -> str:
|
1176
|
-
"""
|
1177
|
-
The Object Storage namespace.
|
1178
|
-
"""
|
1179
1071
|
return pulumi.get(self, "namespace")
|
1180
1072
|
|
1181
1073
|
@property
|
1182
1074
|
@pulumi.getter
|
1183
1075
|
def object(self) -> str:
|
1184
|
-
"""
|
1185
|
-
The Object Storage object name.
|
1186
|
-
"""
|
1187
1076
|
return pulumi.get(self, "object")
|
1188
1077
|
|
1189
1078
|
|
@@ -1193,11 +1082,6 @@ class GetModelModelMetricResult(dict):
|
|
1193
1082
|
final_accuracy: float,
|
1194
1083
|
final_loss: float,
|
1195
1084
|
model_metrics_type: str):
|
1196
|
-
"""
|
1197
|
-
:param float final_accuracy: Fine-tuned model accuracy.
|
1198
|
-
:param float final_loss: Fine-tuned model loss.
|
1199
|
-
:param str model_metrics_type: The type of the model metrics. Each type of model can expect a different set of model metrics.
|
1200
|
-
"""
|
1201
1085
|
pulumi.set(__self__, "final_accuracy", final_accuracy)
|
1202
1086
|
pulumi.set(__self__, "final_loss", final_loss)
|
1203
1087
|
pulumi.set(__self__, "model_metrics_type", model_metrics_type)
|
@@ -1205,25 +1089,16 @@ class GetModelModelMetricResult(dict):
|
|
1205
1089
|
@property
|
1206
1090
|
@pulumi.getter(name="finalAccuracy")
|
1207
1091
|
def final_accuracy(self) -> float:
|
1208
|
-
"""
|
1209
|
-
Fine-tuned model accuracy.
|
1210
|
-
"""
|
1211
1092
|
return pulumi.get(self, "final_accuracy")
|
1212
1093
|
|
1213
1094
|
@property
|
1214
1095
|
@pulumi.getter(name="finalLoss")
|
1215
1096
|
def final_loss(self) -> float:
|
1216
|
-
"""
|
1217
|
-
Fine-tuned model loss.
|
1218
|
-
"""
|
1219
1097
|
return pulumi.get(self, "final_loss")
|
1220
1098
|
|
1221
1099
|
@property
|
1222
1100
|
@pulumi.getter(name="modelMetricsType")
|
1223
1101
|
def model_metrics_type(self) -> str:
|
1224
|
-
"""
|
1225
|
-
The type of the model metrics. Each type of model can expect a different set of model metrics.
|
1226
|
-
"""
|
1227
1102
|
return pulumi.get(self, "model_metrics_type")
|
1228
1103
|
|
1229
1104
|
|
@@ -1290,23 +1165,13 @@ class GetModelsModelCollectionItemResult(dict):
|
|
1290
1165
|
vendor: str,
|
1291
1166
|
version: str):
|
1292
1167
|
"""
|
1293
|
-
:param str base_model_id: The OCID of the base model that's used for fine-tuning. For pretrained models, the value is null.
|
1294
1168
|
:param Sequence[str] capabilities: Describes what this model can be used for.
|
1295
1169
|
:param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to list resources.
|
1296
1170
|
:param Mapping[str, Any] defined_tags: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
|
1297
|
-
:param str description: An optional description of the model.
|
1298
1171
|
:param str display_name: A filter to return only resources that match the given display name exactly.
|
1299
|
-
:param Sequence['GetModelsModelCollectionItemFineTuneDetailArgs'] fine_tune_details: Details about fine-tuning a custom model.
|
1300
|
-
:param Mapping[str, Any] freeform_tags: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
|
1301
1172
|
:param str id: The ID of the model.
|
1302
|
-
:param bool is_long_term_supported: Whether a model is supported long-term. Only applicable to base models.
|
1303
|
-
:param str lifecycle_details: A message describing the current state of the model in more detail that can provide actionable information.
|
1304
|
-
:param Sequence['GetModelsModelCollectionItemModelMetricArgs'] model_metrics: Model metrics during the creation of a new model.
|
1305
1173
|
:param str state: A filter to return only resources their lifecycleState matches the given lifecycleState.
|
1306
|
-
:param Mapping[str, Any] system_tags: System tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"orcl-cloud.free-tier-retained": "true"}`
|
1307
|
-
:param str time_created: The date and time that the model was created in the format of an RFC3339 datetime string.
|
1308
1174
|
:param str time_deprecated: Corresponds to the time when the custom model and its associated foundation model will be deprecated.
|
1309
|
-
:param str time_updated: The date and time that the model was updated in the format of an RFC3339 datetime string.
|
1310
1175
|
:param str type: The model type indicating whether this is a pretrained/base model or a custom/fine-tuned model.
|
1311
1176
|
:param str vendor: A filter to return only resources that match the entire vendor given.
|
1312
1177
|
:param str version: The version of the model.
|
@@ -1335,9 +1200,6 @@ class GetModelsModelCollectionItemResult(dict):
|
|
1335
1200
|
@property
|
1336
1201
|
@pulumi.getter(name="baseModelId")
|
1337
1202
|
def base_model_id(self) -> str:
|
1338
|
-
"""
|
1339
|
-
The OCID of the base model that's used for fine-tuning. For pretrained models, the value is null.
|
1340
|
-
"""
|
1341
1203
|
return pulumi.get(self, "base_model_id")
|
1342
1204
|
|
1343
1205
|
@property
|
@@ -1367,9 +1229,6 @@ class GetModelsModelCollectionItemResult(dict):
|
|
1367
1229
|
@property
|
1368
1230
|
@pulumi.getter
|
1369
1231
|
def description(self) -> str:
|
1370
|
-
"""
|
1371
|
-
An optional description of the model.
|
1372
|
-
"""
|
1373
1232
|
return pulumi.get(self, "description")
|
1374
1233
|
|
1375
1234
|
@property
|
@@ -1383,17 +1242,11 @@ class GetModelsModelCollectionItemResult(dict):
|
|
1383
1242
|
@property
|
1384
1243
|
@pulumi.getter(name="fineTuneDetails")
|
1385
1244
|
def fine_tune_details(self) -> Sequence['outputs.GetModelsModelCollectionItemFineTuneDetailResult']:
|
1386
|
-
"""
|
1387
|
-
Details about fine-tuning a custom model.
|
1388
|
-
"""
|
1389
1245
|
return pulumi.get(self, "fine_tune_details")
|
1390
1246
|
|
1391
1247
|
@property
|
1392
1248
|
@pulumi.getter(name="freeformTags")
|
1393
1249
|
def freeform_tags(self) -> Mapping[str, Any]:
|
1394
|
-
"""
|
1395
|
-
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
|
1396
|
-
"""
|
1397
1250
|
return pulumi.get(self, "freeform_tags")
|
1398
1251
|
|
1399
1252
|
@property
|
@@ -1407,25 +1260,16 @@ class GetModelsModelCollectionItemResult(dict):
|
|
1407
1260
|
@property
|
1408
1261
|
@pulumi.getter(name="isLongTermSupported")
|
1409
1262
|
def is_long_term_supported(self) -> bool:
|
1410
|
-
"""
|
1411
|
-
Whether a model is supported long-term. Only applicable to base models.
|
1412
|
-
"""
|
1413
1263
|
return pulumi.get(self, "is_long_term_supported")
|
1414
1264
|
|
1415
1265
|
@property
|
1416
1266
|
@pulumi.getter(name="lifecycleDetails")
|
1417
1267
|
def lifecycle_details(self) -> str:
|
1418
|
-
"""
|
1419
|
-
A message describing the current state of the model in more detail that can provide actionable information.
|
1420
|
-
"""
|
1421
1268
|
return pulumi.get(self, "lifecycle_details")
|
1422
1269
|
|
1423
1270
|
@property
|
1424
1271
|
@pulumi.getter(name="modelMetrics")
|
1425
1272
|
def model_metrics(self) -> Sequence['outputs.GetModelsModelCollectionItemModelMetricResult']:
|
1426
|
-
"""
|
1427
|
-
Model metrics during the creation of a new model.
|
1428
|
-
"""
|
1429
1273
|
return pulumi.get(self, "model_metrics")
|
1430
1274
|
|
1431
1275
|
@property
|
@@ -1439,17 +1283,11 @@ class GetModelsModelCollectionItemResult(dict):
|
|
1439
1283
|
@property
|
1440
1284
|
@pulumi.getter(name="systemTags")
|
1441
1285
|
def system_tags(self) -> Mapping[str, Any]:
|
1442
|
-
"""
|
1443
|
-
System tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"orcl-cloud.free-tier-retained": "true"}`
|
1444
|
-
"""
|
1445
1286
|
return pulumi.get(self, "system_tags")
|
1446
1287
|
|
1447
1288
|
@property
|
1448
1289
|
@pulumi.getter(name="timeCreated")
|
1449
1290
|
def time_created(self) -> str:
|
1450
|
-
"""
|
1451
|
-
The date and time that the model was created in the format of an RFC3339 datetime string.
|
1452
|
-
"""
|
1453
1291
|
return pulumi.get(self, "time_created")
|
1454
1292
|
|
1455
1293
|
@property
|
@@ -1463,9 +1301,6 @@ class GetModelsModelCollectionItemResult(dict):
|
|
1463
1301
|
@property
|
1464
1302
|
@pulumi.getter(name="timeUpdated")
|
1465
1303
|
def time_updated(self) -> str:
|
1466
|
-
"""
|
1467
|
-
The date and time that the model was updated in the format of an RFC3339 datetime string.
|
1468
|
-
"""
|
1469
1304
|
return pulumi.get(self, "time_updated")
|
1470
1305
|
|
1471
1306
|
@property
|
@@ -1499,11 +1334,6 @@ class GetModelsModelCollectionItemFineTuneDetailResult(dict):
|
|
1499
1334
|
dedicated_ai_cluster_id: str,
|
1500
1335
|
training_configs: Sequence['outputs.GetModelsModelCollectionItemFineTuneDetailTrainingConfigResult'],
|
1501
1336
|
training_datasets: Sequence['outputs.GetModelsModelCollectionItemFineTuneDetailTrainingDatasetResult']):
|
1502
|
-
"""
|
1503
|
-
:param str dedicated_ai_cluster_id: The OCID of the dedicated AI cluster this fine-tuning runs on.
|
1504
|
-
:param Sequence['GetModelsModelCollectionItemFineTuneDetailTrainingConfigArgs'] training_configs: The fine-tuning method and hyperparameters used for fine-tuning a custom model.
|
1505
|
-
:param Sequence['GetModelsModelCollectionItemFineTuneDetailTrainingDatasetArgs'] training_datasets: The dataset used to fine-tune the model.
|
1506
|
-
"""
|
1507
1337
|
pulumi.set(__self__, "dedicated_ai_cluster_id", dedicated_ai_cluster_id)
|
1508
1338
|
pulumi.set(__self__, "training_configs", training_configs)
|
1509
1339
|
pulumi.set(__self__, "training_datasets", training_datasets)
|
@@ -1511,25 +1341,16 @@ class GetModelsModelCollectionItemFineTuneDetailResult(dict):
|
|
1511
1341
|
@property
|
1512
1342
|
@pulumi.getter(name="dedicatedAiClusterId")
|
1513
1343
|
def dedicated_ai_cluster_id(self) -> str:
|
1514
|
-
"""
|
1515
|
-
The OCID of the dedicated AI cluster this fine-tuning runs on.
|
1516
|
-
"""
|
1517
1344
|
return pulumi.get(self, "dedicated_ai_cluster_id")
|
1518
1345
|
|
1519
1346
|
@property
|
1520
1347
|
@pulumi.getter(name="trainingConfigs")
|
1521
1348
|
def training_configs(self) -> Sequence['outputs.GetModelsModelCollectionItemFineTuneDetailTrainingConfigResult']:
|
1522
|
-
"""
|
1523
|
-
The fine-tuning method and hyperparameters used for fine-tuning a custom model.
|
1524
|
-
"""
|
1525
1349
|
return pulumi.get(self, "training_configs")
|
1526
1350
|
|
1527
1351
|
@property
|
1528
1352
|
@pulumi.getter(name="trainingDatasets")
|
1529
1353
|
def training_datasets(self) -> Sequence['outputs.GetModelsModelCollectionItemFineTuneDetailTrainingDatasetResult']:
|
1530
|
-
"""
|
1531
|
-
The dataset used to fine-tune the model.
|
1532
|
-
"""
|
1533
1354
|
return pulumi.get(self, "training_datasets")
|
1534
1355
|
|
1535
1356
|
|
@@ -1540,24 +1361,20 @@ class GetModelsModelCollectionItemFineTuneDetailTrainingConfigResult(dict):
|
|
1540
1361
|
early_stopping_threshold: float,
|
1541
1362
|
learning_rate: float,
|
1542
1363
|
log_model_metrics_interval_in_steps: int,
|
1364
|
+
lora_alpha: int,
|
1365
|
+
lora_dropout: float,
|
1366
|
+
lora_r: int,
|
1543
1367
|
num_of_last_layers: int,
|
1544
1368
|
total_training_epochs: int,
|
1545
1369
|
training_batch_size: int,
|
1546
1370
|
training_config_type: str):
|
1547
|
-
"""
|
1548
|
-
:param int early_stopping_patience: Stop training if the loss metric does not improve beyond 'early_stopping_threshold' for this many times of evaluation.
|
1549
|
-
:param float early_stopping_threshold: How much the loss must improve to prevent early stopping.
|
1550
|
-
:param float learning_rate: The initial learning rate to be used during training
|
1551
|
-
:param int log_model_metrics_interval_in_steps: Determines how frequently to log model metrics.
|
1552
|
-
:param int num_of_last_layers: The number of last layers to be fine-tuned.
|
1553
|
-
:param int total_training_epochs: The maximum number of training epochs to run for.
|
1554
|
-
:param int training_batch_size: The batch size used during training.
|
1555
|
-
:param str training_config_type: The fine-tuning method for training a custom model.
|
1556
|
-
"""
|
1557
1371
|
pulumi.set(__self__, "early_stopping_patience", early_stopping_patience)
|
1558
1372
|
pulumi.set(__self__, "early_stopping_threshold", early_stopping_threshold)
|
1559
1373
|
pulumi.set(__self__, "learning_rate", learning_rate)
|
1560
1374
|
pulumi.set(__self__, "log_model_metrics_interval_in_steps", log_model_metrics_interval_in_steps)
|
1375
|
+
pulumi.set(__self__, "lora_alpha", lora_alpha)
|
1376
|
+
pulumi.set(__self__, "lora_dropout", lora_dropout)
|
1377
|
+
pulumi.set(__self__, "lora_r", lora_r)
|
1561
1378
|
pulumi.set(__self__, "num_of_last_layers", num_of_last_layers)
|
1562
1379
|
pulumi.set(__self__, "total_training_epochs", total_training_epochs)
|
1563
1380
|
pulumi.set(__self__, "training_batch_size", training_batch_size)
|
@@ -1566,65 +1383,56 @@ class GetModelsModelCollectionItemFineTuneDetailTrainingConfigResult(dict):
|
|
1566
1383
|
@property
|
1567
1384
|
@pulumi.getter(name="earlyStoppingPatience")
|
1568
1385
|
def early_stopping_patience(self) -> int:
|
1569
|
-
"""
|
1570
|
-
Stop training if the loss metric does not improve beyond 'early_stopping_threshold' for this many times of evaluation.
|
1571
|
-
"""
|
1572
1386
|
return pulumi.get(self, "early_stopping_patience")
|
1573
1387
|
|
1574
1388
|
@property
|
1575
1389
|
@pulumi.getter(name="earlyStoppingThreshold")
|
1576
1390
|
def early_stopping_threshold(self) -> float:
|
1577
|
-
"""
|
1578
|
-
How much the loss must improve to prevent early stopping.
|
1579
|
-
"""
|
1580
1391
|
return pulumi.get(self, "early_stopping_threshold")
|
1581
1392
|
|
1582
1393
|
@property
|
1583
1394
|
@pulumi.getter(name="learningRate")
|
1584
1395
|
def learning_rate(self) -> float:
|
1585
|
-
"""
|
1586
|
-
The initial learning rate to be used during training
|
1587
|
-
"""
|
1588
1396
|
return pulumi.get(self, "learning_rate")
|
1589
1397
|
|
1590
1398
|
@property
|
1591
1399
|
@pulumi.getter(name="logModelMetricsIntervalInSteps")
|
1592
1400
|
def log_model_metrics_interval_in_steps(self) -> int:
|
1593
|
-
"""
|
1594
|
-
Determines how frequently to log model metrics.
|
1595
|
-
"""
|
1596
1401
|
return pulumi.get(self, "log_model_metrics_interval_in_steps")
|
1597
1402
|
|
1403
|
+
@property
|
1404
|
+
@pulumi.getter(name="loraAlpha")
|
1405
|
+
def lora_alpha(self) -> int:
|
1406
|
+
return pulumi.get(self, "lora_alpha")
|
1407
|
+
|
1408
|
+
@property
|
1409
|
+
@pulumi.getter(name="loraDropout")
|
1410
|
+
def lora_dropout(self) -> float:
|
1411
|
+
return pulumi.get(self, "lora_dropout")
|
1412
|
+
|
1413
|
+
@property
|
1414
|
+
@pulumi.getter(name="loraR")
|
1415
|
+
def lora_r(self) -> int:
|
1416
|
+
return pulumi.get(self, "lora_r")
|
1417
|
+
|
1598
1418
|
@property
|
1599
1419
|
@pulumi.getter(name="numOfLastLayers")
|
1600
1420
|
def num_of_last_layers(self) -> int:
|
1601
|
-
"""
|
1602
|
-
The number of last layers to be fine-tuned.
|
1603
|
-
"""
|
1604
1421
|
return pulumi.get(self, "num_of_last_layers")
|
1605
1422
|
|
1606
1423
|
@property
|
1607
1424
|
@pulumi.getter(name="totalTrainingEpochs")
|
1608
1425
|
def total_training_epochs(self) -> int:
|
1609
|
-
"""
|
1610
|
-
The maximum number of training epochs to run for.
|
1611
|
-
"""
|
1612
1426
|
return pulumi.get(self, "total_training_epochs")
|
1613
1427
|
|
1614
1428
|
@property
|
1615
1429
|
@pulumi.getter(name="trainingBatchSize")
|
1616
1430
|
def training_batch_size(self) -> int:
|
1617
|
-
"""
|
1618
|
-
The batch size used during training.
|
1619
|
-
"""
|
1620
1431
|
return pulumi.get(self, "training_batch_size")
|
1621
1432
|
|
1622
1433
|
@property
|
1623
1434
|
@pulumi.getter(name="trainingConfigType")
|
1624
1435
|
def training_config_type(self) -> str:
|
1625
|
-
"""
|
1626
|
-
The fine-tuning method for training a custom model.
|
1627
|
-
"""
|
1628
1436
|
return pulumi.get(self, "training_config_type")
|
1629
1437
|
|
1630
1438
|
|
@@ -1635,12 +1443,6 @@ class GetModelsModelCollectionItemFineTuneDetailTrainingDatasetResult(dict):
|
|
1635
1443
|
dataset_type: str,
|
1636
1444
|
namespace: str,
|
1637
1445
|
object: str):
|
1638
|
-
"""
|
1639
|
-
:param str bucket: The Object Storage bucket name.
|
1640
|
-
:param str dataset_type: The type of the data asset.
|
1641
|
-
:param str namespace: The Object Storage namespace.
|
1642
|
-
:param str object: The Object Storage object name.
|
1643
|
-
"""
|
1644
1446
|
pulumi.set(__self__, "bucket", bucket)
|
1645
1447
|
pulumi.set(__self__, "dataset_type", dataset_type)
|
1646
1448
|
pulumi.set(__self__, "namespace", namespace)
|
@@ -1649,33 +1451,21 @@ class GetModelsModelCollectionItemFineTuneDetailTrainingDatasetResult(dict):
|
|
1649
1451
|
@property
|
1650
1452
|
@pulumi.getter
|
1651
1453
|
def bucket(self) -> str:
|
1652
|
-
"""
|
1653
|
-
The Object Storage bucket name.
|
1654
|
-
"""
|
1655
1454
|
return pulumi.get(self, "bucket")
|
1656
1455
|
|
1657
1456
|
@property
|
1658
1457
|
@pulumi.getter(name="datasetType")
|
1659
1458
|
def dataset_type(self) -> str:
|
1660
|
-
"""
|
1661
|
-
The type of the data asset.
|
1662
|
-
"""
|
1663
1459
|
return pulumi.get(self, "dataset_type")
|
1664
1460
|
|
1665
1461
|
@property
|
1666
1462
|
@pulumi.getter
|
1667
1463
|
def namespace(self) -> str:
|
1668
|
-
"""
|
1669
|
-
The Object Storage namespace.
|
1670
|
-
"""
|
1671
1464
|
return pulumi.get(self, "namespace")
|
1672
1465
|
|
1673
1466
|
@property
|
1674
1467
|
@pulumi.getter
|
1675
1468
|
def object(self) -> str:
|
1676
|
-
"""
|
1677
|
-
The Object Storage object name.
|
1678
|
-
"""
|
1679
1469
|
return pulumi.get(self, "object")
|
1680
1470
|
|
1681
1471
|
|
@@ -1685,11 +1475,6 @@ class GetModelsModelCollectionItemModelMetricResult(dict):
|
|
1685
1475
|
final_accuracy: float,
|
1686
1476
|
final_loss: float,
|
1687
1477
|
model_metrics_type: str):
|
1688
|
-
"""
|
1689
|
-
:param float final_accuracy: Fine-tuned model accuracy.
|
1690
|
-
:param float final_loss: Fine-tuned model loss.
|
1691
|
-
:param str model_metrics_type: The type of the model metrics. Each type of model can expect a different set of model metrics.
|
1692
|
-
"""
|
1693
1478
|
pulumi.set(__self__, "final_accuracy", final_accuracy)
|
1694
1479
|
pulumi.set(__self__, "final_loss", final_loss)
|
1695
1480
|
pulumi.set(__self__, "model_metrics_type", model_metrics_type)
|
@@ -1697,25 +1482,16 @@ class GetModelsModelCollectionItemModelMetricResult(dict):
|
|
1697
1482
|
@property
|
1698
1483
|
@pulumi.getter(name="finalAccuracy")
|
1699
1484
|
def final_accuracy(self) -> float:
|
1700
|
-
"""
|
1701
|
-
Fine-tuned model accuracy.
|
1702
|
-
"""
|
1703
1485
|
return pulumi.get(self, "final_accuracy")
|
1704
1486
|
|
1705
1487
|
@property
|
1706
1488
|
@pulumi.getter(name="finalLoss")
|
1707
1489
|
def final_loss(self) -> float:
|
1708
|
-
"""
|
1709
|
-
Fine-tuned model loss.
|
1710
|
-
"""
|
1711
1490
|
return pulumi.get(self, "final_loss")
|
1712
1491
|
|
1713
1492
|
@property
|
1714
1493
|
@pulumi.getter(name="modelMetricsType")
|
1715
1494
|
def model_metrics_type(self) -> str:
|
1716
|
-
"""
|
1717
|
-
The type of the model metrics. Each type of model can expect a different set of model metrics.
|
1718
|
-
"""
|
1719
1495
|
return pulumi.get(self, "model_metrics_type")
|
1720
1496
|
|
1721
1497
|
|