@pulumi/digitalocean 4.64.0-alpha.1776233604 → 4.64.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/dedicatedInference.d.ts +228 -0
  2. package/dedicatedInference.js +155 -0
  3. package/dedicatedInference.js.map +1 -0
  4. package/dedicatedInferenceToken.d.ts +117 -0
  5. package/dedicatedInferenceToken.js +101 -0
  6. package/dedicatedInferenceToken.js.map +1 -0
  7. package/droplet.d.ts +24 -6
  8. package/droplet.js +2 -0
  9. package/droplet.js.map +1 -1
  10. package/dropletAutoscale.d.ts +1 -0
  11. package/dropletAutoscale.js +1 -0
  12. package/dropletAutoscale.js.map +1 -1
  13. package/getDedicatedInference.d.ts +102 -0
  14. package/getDedicatedInference.js +56 -0
  15. package/getDedicatedInference.js.map +1 -0
  16. package/getDedicatedInferenceAccelerators.d.ts +117 -0
  17. package/getDedicatedInferenceAccelerators.js +88 -0
  18. package/getDedicatedInferenceAccelerators.js.map +1 -0
  19. package/getDedicatedInferenceGpuModelConfig.d.ts +47 -0
  20. package/getDedicatedInferenceGpuModelConfig.js +48 -0
  21. package/getDedicatedInferenceGpuModelConfig.js.map +1 -0
  22. package/getDedicatedInferenceSizes.d.ts +51 -0
  23. package/getDedicatedInferenceSizes.js +48 -0
  24. package/getDedicatedInferenceSizes.js.map +1 -0
  25. package/getDedicatedInferenceTokens.d.ts +123 -0
  26. package/getDedicatedInferenceTokens.js +94 -0
  27. package/getDedicatedInferenceTokens.js.map +1 -0
  28. package/getDedicatedInferences.d.ts +140 -0
  29. package/getDedicatedInferences.js +120 -0
  30. package/getDedicatedInferences.js.map +1 -0
  31. package/index.d.ts +24 -0
  32. package/index.js +34 -5
  33. package/index.js.map +1 -1
  34. package/nfs.d.ts +44 -1
  35. package/nfs.js +44 -1
  36. package/nfs.js.map +1 -1
  37. package/package.json +2 -2
  38. package/types/input.d.ts +204 -0
  39. package/types/output.d.ts +320 -0
@@ -0,0 +1,228 @@
1
+ import * as pulumi from "@pulumi/pulumi";
2
+ import * as inputs from "./types/input";
3
+ import * as outputs from "./types/output";
4
+ /**
5
+ * Provides a DigitalOcean Dedicated Inference resource. This can be used to create,
6
+ * modify, and delete dedicated inference endpoints for running GPU-accelerated
7
+ * model inference.
8
+ *
9
+ * ## Example Usage
10
+ *
11
+ * ```typescript
12
+ * import * as pulumi from "@pulumi/pulumi";
13
+ * import * as digitalocean from "@pulumi/digitalocean";
14
+ *
15
+ * const example = new digitalocean.DedicatedInference("example", {
16
+ * name: "my-inference-endpoint",
17
+ * region: "tor1",
18
+ * modelDeployments: [{
19
+ * modelSlug: "deepseek-r1-distill-qwen-14b",
20
+ * modelProvider: "digitalocean",
21
+ * accelerators: [{
22
+ * acceleratorSlug: "gpu-h100x1-80gb",
23
+ * scale: 1,
24
+ * type: "nvidia_h100",
25
+ * }],
26
+ * }],
27
+ * });
28
+ * ```
29
+ *
30
+ * ### With Public Endpoint
31
+ *
32
+ * ```typescript
33
+ * import * as pulumi from "@pulumi/pulumi";
34
+ * import * as digitalocean from "@pulumi/digitalocean";
35
+ *
36
+ * const _public = new digitalocean.DedicatedInference("public", {
37
+ * name: "my-public-inference",
38
+ * region: "tor1",
39
+ * enablePublicEndpoint: true,
40
+ * modelDeployments: [{
41
+ * modelSlug: "deepseek-r1-distill-qwen-14b",
42
+ * modelProvider: "digitalocean",
43
+ * accelerators: [{
44
+ * acceleratorSlug: "gpu-h100x1-80gb",
45
+ * scale: 1,
46
+ * type: "nvidia_h100",
47
+ * }],
48
+ * }],
49
+ * });
50
+ * ```
51
+ *
52
+ * ### With VPC
53
+ *
54
+ * ```typescript
55
+ * import * as pulumi from "@pulumi/pulumi";
56
+ * import * as digitalocean from "@pulumi/digitalocean";
57
+ *
58
+ * const _private = new digitalocean.DedicatedInference("private", {
59
+ * name: "my-private-inference",
60
+ * region: "tor1",
61
+ * vpcUuid: example.id,
62
+ * modelDeployments: [{
63
+ * modelSlug: "deepseek-r1-distill-qwen-14b",
64
+ * modelProvider: "digitalocean",
65
+ * accelerators: [{
66
+ * acceleratorSlug: "gpu-h100x1-80gb",
67
+ * scale: 1,
68
+ * type: "nvidia_h100",
69
+ * }],
70
+ * }],
71
+ * });
72
+ * ```
73
+ *
74
+ * ## Import
75
+ *
76
+ * Dedicated inference endpoints can be imported using their `id`, e.g.
77
+ *
78
+ * ```sh
79
+ * $ pulumi import digitalocean:index/dedicatedInference:DedicatedInference example endpoint-id
80
+ * ```
81
+ */
82
+ export declare class DedicatedInference extends pulumi.CustomResource {
83
+ /**
84
+ * Get an existing DedicatedInference resource's state with the given name, ID, and optional extra
85
+ * properties used to qualify the lookup.
86
+ *
87
+ * @param name The _unique_ name of the resulting resource.
88
+ * @param id The _unique_ provider ID of the resource to lookup.
89
+ * @param state Any extra arguments used during the lookup.
90
+ * @param opts Optional settings to control the behavior of the CustomResource.
91
+ */
92
+ static get(name: string, id: pulumi.Input<pulumi.ID>, state?: DedicatedInferenceState, opts?: pulumi.CustomResourceOptions): DedicatedInference;
93
+ /**
94
+ * Returns true if the given object is an instance of DedicatedInference. This is designed to work even
95
+ * when multiple copies of the Pulumi SDK have been loaded into the same process.
96
+ */
97
+ static isInstance(obj: any): obj is DedicatedInference;
98
+ /**
99
+ * The date and time when the dedicated inference endpoint was created.
100
+ */
101
+ readonly createdAt: pulumi.Output<string>;
102
+ /**
103
+ * Whether to enable a public HTTPS endpoint for the dedicated inference endpoint. Defaults to `false`. This field is immutable after creation and changing it forces a new resource.
104
+ */
105
+ readonly enablePublicEndpoint: pulumi.Output<boolean | undefined>;
106
+ /**
107
+ * A HuggingFace token for accessing gated models.
108
+ */
109
+ readonly huggingFaceToken: pulumi.Output<string | undefined>;
110
+ /**
111
+ * The list of model deployments to run on the dedicated inference endpoint. Each `modelDeployments` block supports:
112
+ */
113
+ readonly modelDeployments: pulumi.Output<outputs.DedicatedInferenceModelDeployment[]>;
114
+ /**
115
+ * A human-readable name for the dedicated inference endpoint.
116
+ */
117
+ readonly name: pulumi.Output<string>;
118
+ /**
119
+ * The fully-qualified domain name of the private endpoint.
120
+ */
121
+ readonly privateEndpointFqdn: pulumi.Output<string>;
122
+ /**
123
+ * The fully-qualified domain name of the public endpoint, if enabled.
124
+ */
125
+ readonly publicEndpointFqdn: pulumi.Output<string>;
126
+ /**
127
+ * The region slug where the dedicated inference endpoint will be deployed. Changing this forces a new resource.
128
+ */
129
+ readonly region: pulumi.Output<string>;
130
+ /**
131
+ * The current status of the dedicated inference endpoint.
132
+ */
133
+ readonly status: pulumi.Output<string>;
134
+ /**
135
+ * The date and time when the dedicated inference endpoint was last updated.
136
+ */
137
+ readonly updatedAt: pulumi.Output<string>;
138
+ /**
139
+ * The UUID of the VPC to deploy the dedicated inference endpoint into. Changing this forces a new resource.
140
+ */
141
+ readonly vpcUuid: pulumi.Output<string | undefined>;
142
+ /**
143
+ * Create a DedicatedInference resource with the given unique name, arguments, and options.
144
+ *
145
+ * @param name The _unique_ name of the resource.
146
+ * @param args The arguments to use to populate this resource's properties.
147
+ * @param opts A bag of options that control this resource's behavior.
148
+ */
149
+ constructor(name: string, args: DedicatedInferenceArgs, opts?: pulumi.CustomResourceOptions);
150
+ }
151
+ /**
152
+ * Input properties used for looking up and filtering DedicatedInference resources.
153
+ */
154
+ export interface DedicatedInferenceState {
155
+ /**
156
+ * The date and time when the dedicated inference endpoint was created.
157
+ */
158
+ createdAt?: pulumi.Input<string>;
159
+ /**
160
+ * Whether to enable a public HTTPS endpoint for the dedicated inference endpoint. Defaults to `false`. This field is immutable after creation and changing it forces a new resource.
161
+ */
162
+ enablePublicEndpoint?: pulumi.Input<boolean>;
163
+ /**
164
+ * A HuggingFace token for accessing gated models.
165
+ */
166
+ huggingFaceToken?: pulumi.Input<string>;
167
+ /**
168
+ * The list of model deployments to run on the dedicated inference endpoint. Each `modelDeployments` block supports:
169
+ */
170
+ modelDeployments?: pulumi.Input<pulumi.Input<inputs.DedicatedInferenceModelDeployment>[]>;
171
+ /**
172
+ * A human-readable name for the dedicated inference endpoint.
173
+ */
174
+ name?: pulumi.Input<string>;
175
+ /**
176
+ * The fully-qualified domain name of the private endpoint.
177
+ */
178
+ privateEndpointFqdn?: pulumi.Input<string>;
179
+ /**
180
+ * The fully-qualified domain name of the public endpoint, if enabled.
181
+ */
182
+ publicEndpointFqdn?: pulumi.Input<string>;
183
+ /**
184
+ * The region slug where the dedicated inference endpoint will be deployed. Changing this forces a new resource.
185
+ */
186
+ region?: pulumi.Input<string>;
187
+ /**
188
+ * The current status of the dedicated inference endpoint.
189
+ */
190
+ status?: pulumi.Input<string>;
191
+ /**
192
+ * The date and time when the dedicated inference endpoint was last updated.
193
+ */
194
+ updatedAt?: pulumi.Input<string>;
195
+ /**
196
+ * The UUID of the VPC to deploy the dedicated inference endpoint into. Changing this forces a new resource.
197
+ */
198
+ vpcUuid?: pulumi.Input<string>;
199
+ }
200
+ /**
201
+ * The set of arguments for constructing a DedicatedInference resource.
202
+ */
203
+ export interface DedicatedInferenceArgs {
204
+ /**
205
+ * Whether to enable a public HTTPS endpoint for the dedicated inference endpoint. Defaults to `false`. This field is immutable after creation and changing it forces a new resource.
206
+ */
207
+ enablePublicEndpoint?: pulumi.Input<boolean>;
208
+ /**
209
+ * A HuggingFace token for accessing gated models.
210
+ */
211
+ huggingFaceToken?: pulumi.Input<string>;
212
+ /**
213
+ * The list of model deployments to run on the dedicated inference endpoint. Each `modelDeployments` block supports:
214
+ */
215
+ modelDeployments: pulumi.Input<pulumi.Input<inputs.DedicatedInferenceModelDeployment>[]>;
216
+ /**
217
+ * A human-readable name for the dedicated inference endpoint.
218
+ */
219
+ name?: pulumi.Input<string>;
220
+ /**
221
+ * The region slug where the dedicated inference endpoint will be deployed. Changing this forces a new resource.
222
+ */
223
+ region: pulumi.Input<string>;
224
+ /**
225
+ * The UUID of the VPC to deploy the dedicated inference endpoint into. Changing this forces a new resource.
226
+ */
227
+ vpcUuid?: pulumi.Input<string>;
228
+ }
@@ -0,0 +1,155 @@
1
+ "use strict";
2
+ // *** WARNING: this file was generated by pulumi-language-nodejs. ***
3
+ // *** Do not edit by hand unless you're certain you know what you are doing! ***
4
+ Object.defineProperty(exports, "__esModule", { value: true });
5
+ exports.DedicatedInference = void 0;
6
+ const pulumi = require("@pulumi/pulumi");
7
+ const utilities = require("./utilities");
8
+ /**
9
+ * Provides a DigitalOcean Dedicated Inference resource. This can be used to create,
10
+ * modify, and delete dedicated inference endpoints for running GPU-accelerated
11
+ * model inference.
12
+ *
13
+ * ## Example Usage
14
+ *
15
+ * ```typescript
16
+ * import * as pulumi from "@pulumi/pulumi";
17
+ * import * as digitalocean from "@pulumi/digitalocean";
18
+ *
19
+ * const example = new digitalocean.DedicatedInference("example", {
20
+ * name: "my-inference-endpoint",
21
+ * region: "tor1",
22
+ * modelDeployments: [{
23
+ * modelSlug: "deepseek-r1-distill-qwen-14b",
24
+ * modelProvider: "digitalocean",
25
+ * accelerators: [{
26
+ * acceleratorSlug: "gpu-h100x1-80gb",
27
+ * scale: 1,
28
+ * type: "nvidia_h100",
29
+ * }],
30
+ * }],
31
+ * });
32
+ * ```
33
+ *
34
+ * ### With Public Endpoint
35
+ *
36
+ * ```typescript
37
+ * import * as pulumi from "@pulumi/pulumi";
38
+ * import * as digitalocean from "@pulumi/digitalocean";
39
+ *
40
+ * const _public = new digitalocean.DedicatedInference("public", {
41
+ * name: "my-public-inference",
42
+ * region: "tor1",
43
+ * enablePublicEndpoint: true,
44
+ * modelDeployments: [{
45
+ * modelSlug: "deepseek-r1-distill-qwen-14b",
46
+ * modelProvider: "digitalocean",
47
+ * accelerators: [{
48
+ * acceleratorSlug: "gpu-h100x1-80gb",
49
+ * scale: 1,
50
+ * type: "nvidia_h100",
51
+ * }],
52
+ * }],
53
+ * });
54
+ * ```
55
+ *
56
+ * ### With VPC
57
+ *
58
+ * ```typescript
59
+ * import * as pulumi from "@pulumi/pulumi";
60
+ * import * as digitalocean from "@pulumi/digitalocean";
61
+ *
62
+ * const _private = new digitalocean.DedicatedInference("private", {
63
+ * name: "my-private-inference",
64
+ * region: "tor1",
65
+ * vpcUuid: example.id,
66
+ * modelDeployments: [{
67
+ * modelSlug: "deepseek-r1-distill-qwen-14b",
68
+ * modelProvider: "digitalocean",
69
+ * accelerators: [{
70
+ * acceleratorSlug: "gpu-h100x1-80gb",
71
+ * scale: 1,
72
+ * type: "nvidia_h100",
73
+ * }],
74
+ * }],
75
+ * });
76
+ * ```
77
+ *
78
+ * ## Import
79
+ *
80
+ * Dedicated inference endpoints can be imported using their `id`, e.g.
81
+ *
82
+ * ```sh
83
+ * $ pulumi import digitalocean:index/dedicatedInference:DedicatedInference example endpoint-id
84
+ * ```
85
+ */
86
+ class DedicatedInference extends pulumi.CustomResource {
87
+ /**
88
+ * Get an existing DedicatedInference resource's state with the given name, ID, and optional extra
89
+ * properties used to qualify the lookup.
90
+ *
91
+ * @param name The _unique_ name of the resulting resource.
92
+ * @param id The _unique_ provider ID of the resource to lookup.
93
+ * @param state Any extra arguments used during the lookup.
94
+ * @param opts Optional settings to control the behavior of the CustomResource.
95
+ */
96
+ static get(name, id, state, opts) {
97
+ return new DedicatedInference(name, state, { ...opts, id: id });
98
+ }
99
+ /**
100
+ * Returns true if the given object is an instance of DedicatedInference. This is designed to work even
101
+ * when multiple copies of the Pulumi SDK have been loaded into the same process.
102
+ */
103
+ static isInstance(obj) {
104
+ if (obj === undefined || obj === null) {
105
+ return false;
106
+ }
107
+ return obj['__pulumiType'] === DedicatedInference.__pulumiType;
108
+ }
109
+ constructor(name, argsOrState, opts) {
110
+ let resourceInputs = {};
111
+ opts = opts || {};
112
+ if (opts.id) {
113
+ const state = argsOrState;
114
+ resourceInputs["createdAt"] = state?.createdAt;
115
+ resourceInputs["enablePublicEndpoint"] = state?.enablePublicEndpoint;
116
+ resourceInputs["huggingFaceToken"] = state?.huggingFaceToken;
117
+ resourceInputs["modelDeployments"] = state?.modelDeployments;
118
+ resourceInputs["name"] = state?.name;
119
+ resourceInputs["privateEndpointFqdn"] = state?.privateEndpointFqdn;
120
+ resourceInputs["publicEndpointFqdn"] = state?.publicEndpointFqdn;
121
+ resourceInputs["region"] = state?.region;
122
+ resourceInputs["status"] = state?.status;
123
+ resourceInputs["updatedAt"] = state?.updatedAt;
124
+ resourceInputs["vpcUuid"] = state?.vpcUuid;
125
+ }
126
+ else {
127
+ const args = argsOrState;
128
+ if (args?.modelDeployments === undefined && !opts.urn) {
129
+ throw new Error("Missing required property 'modelDeployments'");
130
+ }
131
+ if (args?.region === undefined && !opts.urn) {
132
+ throw new Error("Missing required property 'region'");
133
+ }
134
+ resourceInputs["enablePublicEndpoint"] = args?.enablePublicEndpoint;
135
+ resourceInputs["huggingFaceToken"] = args?.huggingFaceToken ? pulumi.secret(args.huggingFaceToken) : undefined;
136
+ resourceInputs["modelDeployments"] = args?.modelDeployments;
137
+ resourceInputs["name"] = args?.name;
138
+ resourceInputs["region"] = args?.region;
139
+ resourceInputs["vpcUuid"] = args?.vpcUuid;
140
+ resourceInputs["createdAt"] = undefined /*out*/;
141
+ resourceInputs["privateEndpointFqdn"] = undefined /*out*/;
142
+ resourceInputs["publicEndpointFqdn"] = undefined /*out*/;
143
+ resourceInputs["status"] = undefined /*out*/;
144
+ resourceInputs["updatedAt"] = undefined /*out*/;
145
+ }
146
+ opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);
147
+ const secretOpts = { additionalSecretOutputs: ["huggingFaceToken"] };
148
+ opts = pulumi.mergeOptions(opts, secretOpts);
149
+ super(DedicatedInference.__pulumiType, name, resourceInputs, opts);
150
+ }
151
+ }
152
+ exports.DedicatedInference = DedicatedInference;
153
+ /** @internal */
154
+ DedicatedInference.__pulumiType = 'digitalocean:index/dedicatedInference:DedicatedInference';
155
+ //# sourceMappingURL=dedicatedInference.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"dedicatedInference.js","sourceRoot":"","sources":["../dedicatedInference.ts"],"names":[],"mappings":";AAAA,sEAAsE;AACtE,iFAAiF;;;AAEjF,yCAAyC;AAIzC,yCAAyC;AAEzC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6EG;AACH,MAAa,kBAAmB,SAAQ,MAAM,CAAC,cAAc;IACzD;;;;;;;;OAQG;IACI,MAAM,CAAC,GAAG,CAAC,IAAY,EAAE,EAA2B,EAAE,KAA+B,EAAE,IAAmC;QAC7H,OAAO,IAAI,kBAAkB,CAAC,IAAI,EAAO,KAAK,EAAE,EAAE,GAAG,IAAI,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC;IACzE,CAAC;IAKD;;;OAGG;IACI,MAAM,CAAC,UAAU,CAAC,GAAQ;QAC7B,IAAI,GAAG,KAAK,SAAS,IAAI,GAAG,KAAK,IAAI,EAAE;YACnC,OAAO,KAAK,CAAC;SAChB;QACD,OAAO,GAAG,CAAC,cAAc,CAAC,KAAK,kBAAkB,CAAC,YAAY,CAAC;IACnE,CAAC;IAuDD,YAAY,IAAY,EAAE,WAA8D,EAAE,IAAmC;QACzH,IAAI,cAAc,GAAkB,EAAE,CAAC;QACvC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QAClB,IAAI,IAAI,CAAC,EAAE,EAAE;YACT,MAAM,KAAK,GAAG,WAAkD,CAAC;YACjE,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,EAAE,SAAS,CAAC;YAC/C,cAAc,CAAC,sBAAsB,CAAC,GAAG,KAAK,EAAE,oBAAoB,CAAC;YACrE,cAAc,CAAC,kBAAkB,CAAC,GAAG,KAAK,EAAE,gBAAgB,CAAC;YAC7D,cAAc,CAAC,kBAAkB,CAAC,GAAG,KAAK,EAAE,gBAAgB,CAAC;YAC7D,cAAc,CAAC,MAAM,CAAC,GAAG,KAAK,EAAE,IAAI,CAAC;YACrC,cAAc,CAAC,qBAAqB,CAAC,GAAG,KAAK,EAAE,mBAAmB,CAAC;YACnE,cAAc,CAAC,oBAAoB,CAAC,GAAG,KAAK,EAAE,kBAAkB,CAAC;YACjE,cAAc,CAAC,QAAQ,CAAC,GAAG,KAAK,EAAE,MAAM,CAAC;YACzC,cAAc,CAAC,QAAQ,CAAC,GAAG,KAAK,EAAE,MAAM,CAAC;YACzC,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,EAAE,SAAS,CAAC;YAC/C,cAAc,CAAC,SAAS,CAAC,GAAG,KAAK,EAAE,OAAO,CAAC;SAC9C;aAAM;YACH,MAAM,IAAI,GAAG,WAAiD,CAAC;YAC/D,IAAI,IAAI,EAAE,gBAAgB,KAAK,SAAS,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE;gBACnD,MAAM,IAAI,KAAK,CAAC,8CAA8C,CAAC,CAAC;aACnE;YACD,IAAI,IAAI,EAAE,MAAM,KAAK,SAAS,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE;gBACzC,MAAM,IAAI,KAAK,CAAC,oCAAoC,CAAC,CAAC;aACzD;YACD,cAAc,CAAC,sBAAsB,CAAC,GAAG,IAAI,EAAE,oBAAoB,CAAC;YACpE,cAAc,CAAC,kBAAkB,CAAC,GAAG,IAAI,EAAE,gBAAgB,CAAC,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;YAC/G,cAAc,CAAC,kBAAkB,CAAC,GAAG,IAAI,EAAE,gBAAgB,CAAC;YAC5D,cAAc,CAAC,MAAM,CAAC,GAAG,IAAI,EAAE,IAAI,CAAC;YACpC,cAAc,CAAC,QAAQ,CAAC,GAAG,IAAI,EAAE,MAAM,CAAC;YACxC,cAAc,CAAC,SAAS,CAAC,GAAG,IAAI,EAAE,OAAO,CAAC;YAC1C,cAAc,CAAC,WAAW,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;YAChD,cAAc,CAAC,qBAAqB,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;YAC1D,cAAc,CAAC,oBAAoB,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;YACzD,cAAc,CAAC,QAAQ,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;YAC7C,cAAc,CAAC,WAAW,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;SACnD;QACD,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,SAAS,CAAC,oBAAoB,EAAE,EAAE,IAAI,CAAC,CAAC;QACnE,MAAM,UAAU,GAAG,EAAE,uBAAuB,EAAE,CAAC,kBAAkB,CAAC,EAAE,CAAC;QACrE,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,IAAI,EAAE,UAAU,CAAC,CAAC;QAC7C,KAAK,CAAC,kBAAkB,CAAC,YAAY,EAAE,IAAI,EAAE,cAAc,EAAE,IAAI,CAAC,CAAC;IACvE,CAAC;;AAzHL,gDA0HC;AA5GG,gBAAgB;AACO,+BAAY,GAAG,0DAA0D,CAAC"}
@@ -0,0 +1,117 @@
1
+ import * as pulumi from "@pulumi/pulumi";
2
+ /**
3
+ * Provides a DigitalOcean Dedicated Inference Token resource. This can be used to
4
+ * create and revoke API tokens for dedicated inference endpoints.
5
+ *
6
+ * > **Note:** The `token` attribute is only available immediately after creation
7
+ * and cannot be retrieved afterwards. Make sure to store it securely.
8
+ *
9
+ * ## Example Usage
10
+ *
11
+ * ```typescript
12
+ * import * as pulumi from "@pulumi/pulumi";
13
+ * import * as digitalocean from "@pulumi/digitalocean";
14
+ *
15
+ * const example = new digitalocean.DedicatedInference("example", {
16
+ * name: "my-inference-endpoint",
17
+ * region: "tor1",
18
+ * modelDeployments: [{
19
+ * modelSlug: "deepseek-r1-distill-qwen-14b",
20
+ * modelProvider: "digitalocean",
21
+ * accelerators: [{
22
+ * acceleratorSlug: "gpu-h100x1-80gb",
23
+ * scale: 1,
24
+ * type: "nvidia_h100",
25
+ * }],
26
+ * }],
27
+ * });
28
+ * const exampleDedicatedInferenceToken = new digitalocean.DedicatedInferenceToken("example", {
29
+ * dedicatedInferenceId: example.id,
30
+ * name: "my-api-token",
31
+ * });
32
+ * ```
33
+ *
34
+ * ## Import
35
+ *
36
+ * Dedicated inference tokens can be imported using the composite ID
37
+ * `{dedicated_inference_id}:{token_id}`, e.g.
38
+ *
39
+ * ```sh
40
+ * $ pulumi import digitalocean:index/dedicatedInferenceToken:DedicatedInferenceToken example endpoint-id:token-id
41
+ * ```
42
+ */
43
+ export declare class DedicatedInferenceToken extends pulumi.CustomResource {
44
+ /**
45
+ * Get an existing DedicatedInferenceToken resource's state with the given name, ID, and optional extra
46
+ * properties used to qualify the lookup.
47
+ *
48
+ * @param name The _unique_ name of the resulting resource.
49
+ * @param id The _unique_ provider ID of the resource to lookup.
50
+ * @param state Any extra arguments used during the lookup.
51
+ * @param opts Optional settings to control the behavior of the CustomResource.
52
+ */
53
+ static get(name: string, id: pulumi.Input<pulumi.ID>, state?: DedicatedInferenceTokenState, opts?: pulumi.CustomResourceOptions): DedicatedInferenceToken;
54
+ /**
55
+ * Returns true if the given object is an instance of DedicatedInferenceToken. This is designed to work even
56
+ * when multiple copies of the Pulumi SDK have been loaded into the same process.
57
+ */
58
+ static isInstance(obj: any): obj is DedicatedInferenceToken;
59
+ /**
60
+ * The date and time when the token was created.
61
+ */
62
+ readonly createdAt: pulumi.Output<string>;
63
+ /**
64
+ * The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
65
+ */
66
+ readonly dedicatedInferenceId: pulumi.Output<string>;
67
+ /**
68
+ * A human-readable name for the token. Changing this forces a new resource.
69
+ */
70
+ readonly name: pulumi.Output<string>;
71
+ /**
72
+ * (Sensitive) The token value. Only available immediately after creation and not retrievable afterwards.
73
+ */
74
+ readonly token: pulumi.Output<string>;
75
+ /**
76
+ * Create a DedicatedInferenceToken resource with the given unique name, arguments, and options.
77
+ *
78
+ * @param name The _unique_ name of the resource.
79
+ * @param args The arguments to use to populate this resource's properties.
80
+ * @param opts A bag of options that control this resource's behavior.
81
+ */
82
+ constructor(name: string, args: DedicatedInferenceTokenArgs, opts?: pulumi.CustomResourceOptions);
83
+ }
84
+ /**
85
+ * Input properties used for looking up and filtering DedicatedInferenceToken resources.
86
+ */
87
+ export interface DedicatedInferenceTokenState {
88
+ /**
89
+ * The date and time when the token was created.
90
+ */
91
+ createdAt?: pulumi.Input<string>;
92
+ /**
93
+ * The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
94
+ */
95
+ dedicatedInferenceId?: pulumi.Input<string>;
96
+ /**
97
+ * A human-readable name for the token. Changing this forces a new resource.
98
+ */
99
+ name?: pulumi.Input<string>;
100
+ /**
101
+ * (Sensitive) The token value. Only available immediately after creation and not retrievable afterwards.
102
+ */
103
+ token?: pulumi.Input<string>;
104
+ }
105
+ /**
106
+ * The set of arguments for constructing a DedicatedInferenceToken resource.
107
+ */
108
+ export interface DedicatedInferenceTokenArgs {
109
+ /**
110
+ * The ID of the dedicated inference endpoint this token belongs to. Changing this forces a new resource.
111
+ */
112
+ dedicatedInferenceId: pulumi.Input<string>;
113
+ /**
114
+ * A human-readable name for the token. Changing this forces a new resource.
115
+ */
116
+ name?: pulumi.Input<string>;
117
+ }
@@ -0,0 +1,101 @@
1
+ "use strict";
2
+ // *** WARNING: this file was generated by pulumi-language-nodejs. ***
3
+ // *** Do not edit by hand unless you're certain you know what you are doing! ***
4
+ Object.defineProperty(exports, "__esModule", { value: true });
5
+ exports.DedicatedInferenceToken = void 0;
6
+ const pulumi = require("@pulumi/pulumi");
7
+ const utilities = require("./utilities");
8
+ /**
9
+ * Provides a DigitalOcean Dedicated Inference Token resource. This can be used to
10
+ * create and revoke API tokens for dedicated inference endpoints.
11
+ *
12
+ * > **Note:** The `token` attribute is only available immediately after creation
13
+ * and cannot be retrieved afterwards. Make sure to store it securely.
14
+ *
15
+ * ## Example Usage
16
+ *
17
+ * ```typescript
18
+ * import * as pulumi from "@pulumi/pulumi";
19
+ * import * as digitalocean from "@pulumi/digitalocean";
20
+ *
21
+ * const example = new digitalocean.DedicatedInference("example", {
22
+ * name: "my-inference-endpoint",
23
+ * region: "tor1",
24
+ * modelDeployments: [{
25
+ * modelSlug: "deepseek-r1-distill-qwen-14b",
26
+ * modelProvider: "digitalocean",
27
+ * accelerators: [{
28
+ * acceleratorSlug: "gpu-h100x1-80gb",
29
+ * scale: 1,
30
+ * type: "nvidia_h100",
31
+ * }],
32
+ * }],
33
+ * });
34
+ * const exampleDedicatedInferenceToken = new digitalocean.DedicatedInferenceToken("example", {
35
+ * dedicatedInferenceId: example.id,
36
+ * name: "my-api-token",
37
+ * });
38
+ * ```
39
+ *
40
+ * ## Import
41
+ *
42
+ * Dedicated inference tokens can be imported using the composite ID
43
+ * `{dedicated_inference_id}:{token_id}`, e.g.
44
+ *
45
+ * ```sh
46
+ * $ pulumi import digitalocean:index/dedicatedInferenceToken:DedicatedInferenceToken example endpoint-id:token-id
47
+ * ```
48
+ */
49
+ class DedicatedInferenceToken extends pulumi.CustomResource {
50
+ /**
51
+ * Get an existing DedicatedInferenceToken resource's state with the given name, ID, and optional extra
52
+ * properties used to qualify the lookup.
53
+ *
54
+ * @param name The _unique_ name of the resulting resource.
55
+ * @param id The _unique_ provider ID of the resource to lookup.
56
+ * @param state Any extra arguments used during the lookup.
57
+ * @param opts Optional settings to control the behavior of the CustomResource.
58
+ */
59
+ static get(name, id, state, opts) {
60
+ return new DedicatedInferenceToken(name, state, { ...opts, id: id });
61
+ }
62
+ /**
63
+ * Returns true if the given object is an instance of DedicatedInferenceToken. This is designed to work even
64
+ * when multiple copies of the Pulumi SDK have been loaded into the same process.
65
+ */
66
+ static isInstance(obj) {
67
+ if (obj === undefined || obj === null) {
68
+ return false;
69
+ }
70
+ return obj['__pulumiType'] === DedicatedInferenceToken.__pulumiType;
71
+ }
72
+ constructor(name, argsOrState, opts) {
73
+ let resourceInputs = {};
74
+ opts = opts || {};
75
+ if (opts.id) {
76
+ const state = argsOrState;
77
+ resourceInputs["createdAt"] = state?.createdAt;
78
+ resourceInputs["dedicatedInferenceId"] = state?.dedicatedInferenceId;
79
+ resourceInputs["name"] = state?.name;
80
+ resourceInputs["token"] = state?.token;
81
+ }
82
+ else {
83
+ const args = argsOrState;
84
+ if (args?.dedicatedInferenceId === undefined && !opts.urn) {
85
+ throw new Error("Missing required property 'dedicatedInferenceId'");
86
+ }
87
+ resourceInputs["dedicatedInferenceId"] = args?.dedicatedInferenceId;
88
+ resourceInputs["name"] = args?.name;
89
+ resourceInputs["createdAt"] = undefined /*out*/;
90
+ resourceInputs["token"] = undefined /*out*/;
91
+ }
92
+ opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);
93
+ const secretOpts = { additionalSecretOutputs: ["token"] };
94
+ opts = pulumi.mergeOptions(opts, secretOpts);
95
+ super(DedicatedInferenceToken.__pulumiType, name, resourceInputs, opts);
96
+ }
97
+ }
98
+ exports.DedicatedInferenceToken = DedicatedInferenceToken;
99
+ /** @internal */
100
+ DedicatedInferenceToken.__pulumiType = 'digitalocean:index/dedicatedInferenceToken:DedicatedInferenceToken';
101
+ //# sourceMappingURL=dedicatedInferenceToken.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"dedicatedInferenceToken.js","sourceRoot":"","sources":["../dedicatedInferenceToken.ts"],"names":[],"mappings":";AAAA,sEAAsE;AACtE,iFAAiF;;;AAEjF,yCAAyC;AACzC,yCAAyC;AAEzC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAwCG;AACH,MAAa,uBAAwB,SAAQ,MAAM,CAAC,cAAc;IAC9D;;;;;;;;OAQG;IACI,MAAM,CAAC,GAAG,CAAC,IAAY,EAAE,EAA2B,EAAE,KAAoC,EAAE,IAAmC;QAClI,OAAO,IAAI,uBAAuB,CAAC,IAAI,EAAO,KAAK,EAAE,EAAE,GAAG,IAAI,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,CAAC;IAC9E,CAAC;IAKD;;;OAGG;IACI,MAAM,CAAC,UAAU,CAAC,GAAQ;QAC7B,IAAI,GAAG,KAAK,SAAS,IAAI,GAAG,KAAK,IAAI,EAAE;YACnC,OAAO,KAAK,CAAC;SAChB;QACD,OAAO,GAAG,CAAC,cAAc,CAAC,KAAK,uBAAuB,CAAC,YAAY,CAAC;IACxE,CAAC;IA2BD,YAAY,IAAY,EAAE,WAAwE,EAAE,IAAmC;QACnI,IAAI,cAAc,GAAkB,EAAE,CAAC;QACvC,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QAClB,IAAI,IAAI,CAAC,EAAE,EAAE;YACT,MAAM,KAAK,GAAG,WAAuD,CAAC;YACtE,cAAc,CAAC,WAAW,CAAC,GAAG,KAAK,EAAE,SAAS,CAAC;YAC/C,cAAc,CAAC,sBAAsB,CAAC,GAAG,KAAK,EAAE,oBAAoB,CAAC;YACrE,cAAc,CAAC,MAAM,CAAC,GAAG,KAAK,EAAE,IAAI,CAAC;YACrC,cAAc,CAAC,OAAO,CAAC,GAAG,KAAK,EAAE,KAAK,CAAC;SAC1C;aAAM;YACH,MAAM,IAAI,GAAG,WAAsD,CAAC;YACpE,IAAI,IAAI,EAAE,oBAAoB,KAAK,SAAS,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE;gBACvD,MAAM,IAAI,KAAK,CAAC,kDAAkD,CAAC,CAAC;aACvE;YACD,cAAc,CAAC,sBAAsB,CAAC,GAAG,IAAI,EAAE,oBAAoB,CAAC;YACpE,cAAc,CAAC,MAAM,CAAC,GAAG,IAAI,EAAE,IAAI,CAAC;YACpC,cAAc,CAAC,WAAW,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;YAChD,cAAc,CAAC,OAAO,CAAC,GAAG,SAAS,CAAC,OAAO,CAAC;SAC/C;QACD,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,SAAS,CAAC,oBAAoB,EAAE,EAAE,IAAI,CAAC,CAAC;QACnE,MAAM,UAAU,GAAG,EAAE,uBAAuB,EAAE,CAAC,OAAO,CAAC,EAAE,CAAC;QAC1D,IAAI,GAAG,MAAM,CAAC,YAAY,CAAC,IAAI,EAAE,UAAU,CAAC,CAAC;QAC7C,KAAK,CAAC,uBAAuB,CAAC,YAAY,EAAE,IAAI,EAAE,cAAc,EAAE,IAAI,CAAC,CAAC;IAC5E,CAAC;;AA5EL,0DA6EC;AA/DG,gBAAgB;AACO,oCAAY,GAAG,oEAAoE,CAAC"}