@flyteorg/flyteidl 1.3.0 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@flyteorg/flyteidl",
3
- "version": "1.3.0",
3
+ "version": "1.3.2",
4
4
  "description": "Compiled protocol buffers and gRPC service clients/servers for Flyte IDLs",
5
5
  "repository": {
6
6
  "type": "git",
@@ -38,6 +38,100 @@ will be executed concurrently.
38
38
 
39
39
 
40
40
 
41
+ ..
42
+ end messages
43
+
44
+
45
+ ..
46
+ end enums
47
+
48
+
49
+ ..
50
+ end HasExtensions
51
+
52
+
53
+ ..
54
+ end services
55
+
56
+
57
+
58
+
59
+ .. _ref_flyteidl/plugins/dask.proto:
60
+
61
+ flyteidl/plugins/dask.proto
62
+ ==================================================================
63
+
64
+
65
+
66
+
67
+
68
+ .. _ref_flyteidl.plugins.DaskCluster:
69
+
70
+ DaskCluster
71
+ ------------------------------------------------------------------
72
+
73
+
74
+
75
+
76
+
77
+ .. csv-table:: DaskCluster type fields
78
+ :header: "Field", "Type", "Label", "Description"
79
+ :widths: auto
80
+
81
+ "image", ":ref:`ref_string`", "", "Optional image to use for the scheduler as well as the default worker group. If unset, will use the default image."
82
+ "nWorkers", ":ref:`ref_int32`", "", "Number of workers in the default worker group"
83
+ "resources", ":ref:`ref_flyteidl.core.Resources`", "", "Resources assigned to the scheduler as well as all pods of the default worker group. As per https://kubernetes.dask.org/en/latest/kubecluster.html?highlight=limit#best-practices it is advised to only set limits. If requests are not explicitly set, the plugin will make sure to set requests==limits. The plugin sets ` --memory-limit` as well as `--nthreads` for the workers according to the limit."
84
+
85
+
86
+
87
+
88
+
89
+
90
+
91
+ .. _ref_flyteidl.plugins.DaskJob:
92
+
93
+ DaskJob
94
+ ------------------------------------------------------------------
95
+
96
+ Custom Proto for Dask Plugin
97
+
98
+
99
+
100
+ .. csv-table:: DaskJob type fields
101
+ :header: "Field", "Type", "Label", "Description"
102
+ :widths: auto
103
+
104
+ "namespace", ":ref:`ref_string`", "", "Optional namespace to use for the dask pods. If none is given, the namespace of the Flyte task is used"
105
+ "jobPodSpec", ":ref:`ref_flyteidl.plugins.JobPodSpec`", "", "Spec for the job pod"
106
+ "cluster", ":ref:`ref_flyteidl.plugins.DaskCluster`", "", "Cluster"
107
+
108
+
109
+
110
+
111
+
112
+
113
+
114
+ .. _ref_flyteidl.plugins.JobPodSpec:
115
+
116
+ JobPodSpec
117
+ ------------------------------------------------------------------
118
+
119
+ Specification for the job pod
120
+
121
+
122
+
123
+ .. csv-table:: JobPodSpec type fields
124
+ :header: "Field", "Type", "Label", "Description"
125
+ :widths: auto
126
+
127
+ "image", ":ref:`ref_string`", "", "Optional image to use. If unset, will use the default image."
128
+ "resources", ":ref:`ref_flyteidl.core.Resources`", "", "Resources assigned to the job pod."
129
+
130
+
131
+
132
+
133
+
134
+
41
135
  ..
42
136
  end messages
43
137
 
@@ -501,6 +595,7 @@ Custom Proto for Spark Plugin.
501
595
  "sparkConf", ":ref:`ref_flyteidl.plugins.SparkJob.SparkConfEntry`", "repeated", ""
502
596
  "hadoopConf", ":ref:`ref_flyteidl.plugins.SparkJob.HadoopConfEntry`", "repeated", ""
503
597
  "executorPath", ":ref:`ref_string`", "", "Executor path for Python jobs."
598
+ "databricksConf", ":ref:`ref_string`", "", "databricksConf is base64 encoded string which stores databricks job configuration. Config structure can be found here. https://docs.databricks.com/dev-tools/api/2.0/jobs.html#request-structure The config is automatically encoded by flytekit, and decoded in the propeller."
504
599
 
505
600
 
506
601
 
@@ -203,34 +203,55 @@ message UpdateArtifactResponse {
203
203
  * ReservationID message that is composed of several string fields.
204
204
  */
205
205
  message ReservationID {
206
+ // The unique ID for the reserved dataset
206
207
  DatasetID dataset_id = 1;
208
+
209
+ // The specific artifact tag for the reservation
207
210
  string tag_name = 2;
208
211
  }
209
212
 
210
213
  // Try to acquire or extend an artifact reservation. If an active reservation exists, retreive that instance.
211
214
  message GetOrExtendReservationRequest {
215
+ // The unique ID for the reservation
212
216
  ReservationID reservation_id = 1;
217
+
218
+ // The unique ID of the owner for the reservation
213
219
  string owner_id = 2;
214
- google.protobuf.Duration heartbeat_interval = 3; // Requested reservation extension heartbeat interval
220
+
221
+ // Requested reservation extension heartbeat interval
222
+ google.protobuf.Duration heartbeat_interval = 3;
215
223
  }
216
224
 
217
225
  // A reservation including owner, heartbeat interval, expiration timestamp, and various metadata.
218
226
  message Reservation {
227
+ // The unique ID for the reservation
219
228
  ReservationID reservation_id = 1;
229
+
230
+ // The unique ID of the owner for the reservation
220
231
  string owner_id = 2;
221
- google.protobuf.Duration heartbeat_interval = 3; // Recommended heartbeat interval to extend reservation
222
- google.protobuf.Timestamp expires_at = 4; // Expiration timestamp of this reservation
232
+
233
+ // Recommended heartbeat interval to extend reservation
234
+ google.protobuf.Duration heartbeat_interval = 3;
235
+
236
+ // Expiration timestamp of this reservation
237
+ google.protobuf.Timestamp expires_at = 4;
238
+
239
+ // Free-form metadata associated with the artifact
223
240
  Metadata metadata = 6;
224
241
  }
225
242
 
226
243
  // Response including either a newly minted reservation or the existing reservation
227
244
  message GetOrExtendReservationResponse {
245
+ // The reservation to be acquired or extended
228
246
  Reservation reservation = 1;
229
247
  }
230
248
 
231
249
  // Request to release reservation
232
250
  message ReleaseReservationRequest {
251
+ // The unique ID for the reservation
233
252
  ReservationID reservation_id = 1;
253
+
254
+ // The unique ID of the owner for the reservation
234
255
  string owner_id = 2;
235
256
  }
236
257
 
@@ -0,0 +1,41 @@
1
+ syntax = "proto3";
2
+
3
+ import "flyteidl/core/tasks.proto";
4
+
5
+ package flyteidl.plugins;
6
+
7
+ option go_package = "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins";
8
+
9
+
10
+ // Custom Proto for Dask Plugin.
11
+ message DaskJob {
12
+ // Spec for the scheduler pod.
13
+ DaskScheduler scheduler = 1;
14
+
15
+ // Spec of the default worker group.
16
+ DaskWorkerGroup workers = 2;
17
+ }
18
+
19
+ // Specification for the scheduler pod.
20
+ message DaskScheduler {
21
+ // Optional image to use. If unset, will use the default image.
22
+ string image = 1;
23
+
24
+ // Resources assigned to the scheduler pod.
25
+ core.Resources resources = 2;
26
+ }
27
+
28
+ message DaskWorkerGroup {
29
+ // Number of workers in the group.
30
+ uint32 number_of_workers = 1;
31
+
32
+ // Optional image to use for the pods of the worker group. If unset, will use the default image.
33
+ string image = 2;
34
+
35
+ // Resources assigned to the all pods of the worker group.
36
+ // As per https://kubernetes.dask.org/en/latest/kubecluster.html?highlight=limit#best-practices
37
+ // it is advised to only set limits. If requests are not explicitly set, the plugin will make
38
+ // sure to set requests==limits.
39
+ // The plugin sets ` --memory-limit` as well as `--nthreads` for the workers according to the limit.
40
+ core.Resources resources = 3;
41
+ }
@@ -1,6 +1,7 @@
1
1
  syntax = "proto3";
2
2
 
3
3
  package flyteidl.plugins;
4
+ import "google/protobuf/struct.proto";
4
5
 
5
6
  option go_package = "github.com/flyteorg/flyteidl/gen/pb-go/flyteidl/plugins";
6
7
 
@@ -21,4 +22,13 @@ message SparkJob {
21
22
  map<string, string> sparkConf = 4;
22
23
  map<string, string> hadoopConf = 5;
23
24
  string executorPath = 6; // Executor path for Python jobs.
25
+ // Databricks job configuration.
26
+ // Config structure can be found here. https://docs.databricks.com/dev-tools/api/2.0/jobs.html#request-structure.
27
+ google.protobuf.Struct databricksConf = 7;
28
+ // Databricks access token. https://docs.databricks.com/dev-tools/api/latest/authentication.html
29
+ // This token can be set in either flytepropeller or flytekit.
30
+ string databricksToken = 8;
31
+ // Domain name of your deployment. Use the form <account>.cloud.databricks.com.
32
+ // This instance name can be set in either flytepropeller or flytekit.
33
+ string databricksInstance = 9;
24
34
  }