dagster-spark 0.26.6__py3-none-any.whl → 0.26.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dagster-spark might be problematic. Click here for more details.

@@ -48,22 +48,91 @@ def spark_config():
48
48
  ),
49
49
  "memoryOverhead": Field(
50
50
  StringSource,
51
- description="""Application Properties: The amount of off-heap memory to be allocated per driver in cluster mode, in MiB unless otherwise specified. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the container size (typically 6-10%). This option is currently supported on YARN and Kubernetes.""",
51
+ description="""Application Properties: Amount of non-heap memory to be allocated per driver process in cluster mode, in MiB unless otherwise specified. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the container size (typically 6-10%). This option is currently supported on YARN, Mesos and Kubernetes. Note: Non-heap memory includes off-heap memory (when spark.memory.offHeap.enabled=true) and memory used by other driver processes (e.g. python process that goes with a PySpark driver) and memory used by other non-driver processes running in the same container. The maximum memory size of container to running driver is determined by the sum of spark.driver.memoryOverhead and spark.driver.memory.""",
52
52
  is_required=False,
53
53
  ),
54
+ "memoryOverheadFactor": Field(
55
+ StringSource,
56
+ description="""Application Properties: Fraction of driver memory to be allocated as additional non-heap memory per driver process in cluster mode. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the container size. This value defaults to 0.10 except for Kubernetes non-JVM jobs, which defaults to 0.40. This is done as non-JVM tasks need more non-JVM heap space and such tasks commonly fail with "Memory Overhead Exceeded" errors. This preempts this error with a higher default. This value is ignored if spark.driver.memoryOverhead is set directly.""",
57
+ is_required=False,
58
+ ),
59
+ "resource": Field(
60
+ Permissive(
61
+ fields={
62
+ "{resourceName}": Field(
63
+ Permissive(
64
+ fields={
65
+ "amount": Field(
66
+ StringSource,
67
+ description="""Application Properties: Amount of a particular resource type to use on the driver. If this is used, you must also specify the spark.driver.resource.{resourceName}.discoveryScript for the driver to find the resource on startup.""",
68
+ is_required=False,
69
+ ),
70
+ "discoveryScript": Field(
71
+ StringSource,
72
+ description="""Application Properties: A script for the driver to run to discover a particular resource type. This should write to STDOUT a JSON string in the format of the ResourceInformation class. This has a name and an array of addresses. For a client-submitted driver, discovery script must assign different resource addresses to this driver comparing to other drivers on the same host.""",
73
+ is_required=False,
74
+ ),
75
+ "vendor": Field(
76
+ StringSource,
77
+ description="""Application Properties: Vendor of the resources to use for the driver. This option is currently only supported on Kubernetes and is actually both the vendor and domain following the Kubernetes device plugin naming convention. (e.g. For GPUs on Kubernetes this config would be set to nvidia.com or amd.com)""",
78
+ is_required=False,
79
+ ),
80
+ }
81
+ )
82
+ ),
83
+ }
84
+ )
85
+ ),
54
86
  "supervise": Field(
55
87
  Bool,
56
88
  description="""Application Properties: If true, restarts the driver automatically if it fails with a non-zero exit status. Only has effect in Spark standalone mode or Mesos cluster deploy mode.""",
57
89
  is_required=False,
58
90
  ),
91
+ "log": Field(
92
+ Permissive(
93
+ fields={
94
+ "dfsDir": Field(
95
+ StringSource,
96
+ description="""Application Properties: Base directory in which Spark driver logs are synced, if spark.driver.log.persistToDfs.enabled is true. Within this base directory, each application logs the driver logs to an application specific file. Users may want to set this to a unified location like an HDFS directory so driver log files can be persisted for later usage. This directory should allow any Spark user to read/write files and the Spark History Server user to delete files. Additionally, older logs from this directory are cleaned by the Spark History Server if spark.history.fs.driverlog.cleaner.enabled is true and, if they are older than max age configured by setting spark.history.fs.driverlog.cleaner.maxAge.""",
97
+ is_required=False,
98
+ ),
99
+ "persistToDfs": Field(
100
+ Permissive(
101
+ fields={
102
+ "enabled": Field(
103
+ StringSource,
104
+ description="""Application Properties: If true, spark application running in client mode will write driver logs to a persistent storage, configured in spark.driver.log.dfsDir. If spark.driver.log.dfsDir is not configured, driver logs will not be persisted. Additionally, enable the cleaner by setting spark.history.fs.driverlog.cleaner.enabled to true in Spark History Server.""",
105
+ is_required=False,
106
+ ),
107
+ }
108
+ )
109
+ ),
110
+ "layout": Field(
111
+ StringSource,
112
+ description="""Application Properties: The layout for the driver logs that are synced to spark.driver.log.dfsDir. If this is not configured, it uses the layout for the first appender defined in log4j2.properties. If that is also not configured, driver logs use the default layout.""",
113
+ is_required=False,
114
+ ),
115
+ "allowErasureCoding": Field(
116
+ StringSource,
117
+ description="""Application Properties: Whether to allow driver logs to use erasure coding. On HDFS, erasure coded files will not update as quickly as regular replicated files, so they make take longer to reflect changes written by the application. Note that even if this is true, Spark will still not force the file to use erasure coding, it will simply use file system defaults.""",
118
+ is_required=False,
119
+ ),
120
+ }
121
+ )
122
+ ),
59
123
  "extraClassPath": Field(
60
124
  StringSource,
61
125
  description="""Runtime Environment: Extra classpath entries to prepend to the classpath of the driver. Note: In client mode, this config must not be set through the SparkConf directly in your application, because the driver JVM has already started at that point. Instead, please set this through the --driver-class-path command line option or in your default properties file.""",
62
126
  is_required=False,
63
127
  ),
128
+ "defaultJavaOptions": Field(
129
+ StringSource,
130
+ description="""Runtime Environment: A string of default JVM options to prepend to spark.driver.extraJavaOptions. This is intended to be set by administrators. For instance, GC settings or other logging. Note that it is illegal to set maximum heap size (-Xmx) settings with this option. Maximum heap size settings can be set with spark.driver.memory in the cluster mode and through the --driver-memory command line option in the client mode. Note: In client mode, this config must not be set through the SparkConf directly in your application, because the driver JVM has already started at that point. Instead, please set this through the --driver-java-options command line option or in your default properties file.""",
131
+ is_required=False,
132
+ ),
64
133
  "extraJavaOptions": Field(
65
134
  StringSource,
66
- description="""Runtime Environment: A string of extra JVM options to pass to the driver. For instance, GC settings or other logging. Note that it is illegal to set maximum heap size (-Xmx) settings with this option. Maximum heap size settings can be set with spark.driver.memory in the cluster mode and through the --driver-memory command line option in the client mode. Note: In client mode, this config must not be set through the SparkConf directly in your application, because the driver JVM has already started at that point. Instead, please set this through the --driver-java-options command line option or in your default properties file.""",
135
+ description="""Runtime Environment: A string of extra JVM options to pass to the driver. This is intended to be set by users. For instance, GC settings or other logging. Note that it is illegal to set maximum heap size (-Xmx) settings with this option. Maximum heap size settings can be set with spark.driver.memory in the cluster mode and through the --driver-memory command line option in the client mode. Note: In client mode, this config must not be set through the SparkConf directly in your application, because the driver JVM has already started at that point. Instead, please set this through the --driver-java-options command line option or in your default properties file. spark.driver.defaultJavaOptions will be prepended to this configuration.""",
67
136
  is_required=False,
68
137
  ),
69
138
  "extraLibraryPath": Field(
@@ -105,6 +174,17 @@ def spark_config():
105
174
  }
106
175
  )
107
176
  ),
177
+ "resources": Field(
178
+ Permissive(
179
+ fields={
180
+ "discoveryPlugin": Field(
181
+ StringSource,
182
+ description="""Application Properties: Comma-separated list of class names implementing org.apache.spark.api.resource.ResourceDiscoveryPlugin to load into the application. This is for advanced users to replace the resource discovery class with a custom implementation. Spark will try each class specified until one of them returns the resource information for that resource. It tries the discovery script last if none of the plugins return information for that resource.""",
183
+ is_required=False,
184
+ ),
185
+ }
186
+ )
187
+ ),
108
188
  "executor": Field(
109
189
  Permissive(
110
190
  fields={
@@ -118,7 +198,7 @@ def spark_config():
118
198
  fields={
119
199
  "memory": Field(
120
200
  StringSource,
121
- description="""Application Properties: The amount of memory to be allocated to PySpark in each executor, in MiB unless otherwise specified. If set, PySpark memory for an executor will be limited to this amount. If not set, Spark will not limit Python's memory use and it is up to the application to avoid exceeding the overhead memory space shared with other non-JVM processes. When PySpark is run in YARN or Kubernetes, this memory is added to executor resource requests.""",
201
+ description="""Application Properties: The amount of memory to be allocated to PySpark in each executor, in MiB unless otherwise specified. If set, PySpark memory for an executor will be limited to this amount. If not set, Spark will not limit Python's memory use and it is up to the application to avoid exceeding the overhead memory space shared with other non-JVM processes. When PySpark is run in YARN or Kubernetes, this memory is added to executor resource requests. Note: This feature is dependent on Python's `resource` module; therefore, the behaviors and limitations are inherited. For instance, Windows does not support resource limiting and actual resource is not limited on MacOS.""",
122
202
  is_required=False,
123
203
  ),
124
204
  }
@@ -126,7 +206,70 @@ def spark_config():
126
206
  ),
127
207
  "memoryOverhead": Field(
128
208
  StringSource,
129
- description="""Application Properties: The amount of off-heap memory to be allocated per executor, in MiB unless otherwise specified. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the executor size (typically 6-10%). This option is currently supported on YARN and Kubernetes.""",
209
+ description="""Application Properties: Amount of additional memory to be allocated per executor process, in MiB unless otherwise specified. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the executor size (typically 6-10%). This option is currently supported on YARN and Kubernetes. Note: Additional memory includes PySpark executor memory (when spark.executor.pyspark.memory is not configured) and memory used by other non-executor processes running in the same container. The maximum memory size of container to running executor is determined by the sum of spark.executor.memoryOverhead, spark.executor.memory, spark.memory.offHeap.size and spark.executor.pyspark.memory.""",
210
+ is_required=False,
211
+ ),
212
+ "memoryOverheadFactor": Field(
213
+ StringSource,
214
+ description="""Application Properties: Fraction of executor memory to be allocated as additional non-heap memory per executor process. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the container size. This value defaults to 0.10 except for Kubernetes non-JVM jobs, which defaults to 0.40. This is done as non-JVM tasks need more non-JVM heap space and such tasks commonly fail with "Memory Overhead Exceeded" errors. This preempts this error with a higher default. This value is ignored if spark.executor.memoryOverhead is set directly.""",
215
+ is_required=False,
216
+ ),
217
+ "resource": Field(
218
+ Permissive(
219
+ fields={
220
+ "{resourceName}": Field(
221
+ Permissive(
222
+ fields={
223
+ "amount": Field(
224
+ StringSource,
225
+ description="""Application Properties: Amount of a particular resource type to use per executor process. If this is used, you must also specify the spark.executor.resource.{resourceName}.discoveryScript for the executor to find the resource on startup.""",
226
+ is_required=False,
227
+ ),
228
+ "discoveryScript": Field(
229
+ StringSource,
230
+ description="""Application Properties: A script for the executor to run to discover a particular resource type. This should write to STDOUT a JSON string in the format of the ResourceInformation class. This has a name and an array of addresses.""",
231
+ is_required=False,
232
+ ),
233
+ "vendor": Field(
234
+ StringSource,
235
+ description="""Application Properties: Vendor of the resources to use for the executors. This option is currently only supported on Kubernetes and is actually both the vendor and domain following the Kubernetes device plugin naming convention. (e.g. For GPUs on Kubernetes this config would be set to nvidia.com or amd.com)""",
236
+ is_required=False,
237
+ ),
238
+ }
239
+ )
240
+ ),
241
+ }
242
+ )
243
+ ),
244
+ "decommission": Field(
245
+ Permissive(
246
+ fields={
247
+ "killInterval": Field(
248
+ StringSource,
249
+ description="""Application Properties: Duration after which a decommissioned executor will be killed forcefully by an outside (e.g. non-spark) service.""",
250
+ is_required=False,
251
+ ),
252
+ "forceKillTimeout": Field(
253
+ StringSource,
254
+ description="""Application Properties: Duration after which a Spark will force a decommissioning executor to exit. This should be set to a high value in most situations as low values will prevent block migrations from having enough time to complete.""",
255
+ is_required=False,
256
+ ),
257
+ "signal": Field(
258
+ StringSource,
259
+ description="""Application Properties: The signal that used to trigger the executor to start decommission.""",
260
+ is_required=False,
261
+ ),
262
+ }
263
+ )
264
+ ),
265
+ "maxNumFailures": Field(
266
+ StringSource,
267
+ description="""Application Properties: The maximum number of executor failures before failing the application. This configuration only takes effect on YARN, or Kubernetes when `spark.kubernetes.allocation.pods.allocator` is set to 'direct'.""",
268
+ is_required=False,
269
+ ),
270
+ "failuresValidityInterval": Field(
271
+ StringSource,
272
+ description="""Application Properties: Interval after which executor failures will be considered independent and not accumulate towards the attempt count. This configuration only takes effect on YARN, or Kubernetes when `spark.kubernetes.allocation.pods.allocator` is set to 'direct'.""",
130
273
  is_required=False,
131
274
  ),
132
275
  "extraClassPath": Field(
@@ -134,9 +277,14 @@ def spark_config():
134
277
  description="""Runtime Environment: Extra classpath entries to prepend to the classpath of executors. This exists primarily for backwards-compatibility with older versions of Spark. Users typically should not need to set this option.""",
135
278
  is_required=False,
136
279
  ),
280
+ "defaultJavaOptions": Field(
281
+ StringSource,
282
+ description="""Runtime Environment: A string of default JVM options to prepend to spark.executor.extraJavaOptions. This is intended to be set by administrators. For instance, GC settings or other logging. Note that it is illegal to set Spark properties or maximum heap size (-Xmx) settings with this option. Spark properties should be set using a SparkConf object or the spark-defaults.conf file used with the spark-submit script. Maximum heap size settings can be set with spark.executor.memory. The following symbols, if present will be interpolated: {{APP_ID}} will be replaced by application ID and {{EXECUTOR_ID}} will be replaced by executor ID. For example, to enable verbose gc logging to a file named for the executor ID of the app in /tmp, pass a 'value' of: -verbose:gc -Xloggc:/tmp/{{APP_ID}}-{{EXECUTOR_ID}}.gc""",
283
+ is_required=False,
284
+ ),
137
285
  "extraJavaOptions": Field(
138
286
  StringSource,
139
- description="""Runtime Environment: A string of extra JVM options to pass to executors. For instance, GC settings or other logging. Note that it is illegal to set Spark properties or maximum heap size (-Xmx) settings with this option. Spark properties should be set using a SparkConf object or the spark-defaults.conf file used with the spark-submit script. Maximum heap size settings can be set with spark.executor.memory. The following symbols, if present will be interpolated: {{APP_ID}} will be replaced by application ID and {{EXECUTOR_ID}} will be replaced by executor ID. For example, to enable verbose gc logging to a file named for the executor ID of the app in /tmp, pass a 'value' of: -verbose:gc -Xloggc:/tmp/{{APP_ID}}-{{EXECUTOR_ID}}.gc""",
287
+ description="""Runtime Environment: A string of extra JVM options to pass to executors. This is intended to be set by users. For instance, GC settings or other logging. Note that it is illegal to set Spark properties or maximum heap size (-Xmx) settings with this option. Spark properties should be set using a SparkConf object or the spark-defaults.conf file used with the spark-submit script. Maximum heap size settings can be set with spark.executor.memory. The following symbols, if present will be interpolated: {{APP_ID}} will be replaced by application ID and {{EXECUTOR_ID}} will be replaced by executor ID. For example, to enable verbose gc logging to a file named for the executor ID of the app in /tmp, pass a 'value' of: -verbose:gc -Xloggc:/tmp/{{APP_ID}}-{{EXECUTOR_ID}}.gc spark.executor.defaultJavaOptions will be prepended to this configuration.""",
140
288
  is_required=False,
141
289
  ),
142
290
  "extraLibraryPath": Field(
@@ -202,6 +350,33 @@ def spark_config():
202
350
  description="""Execution Behavior: Interval between each executor's heartbeats to the driver. Heartbeats let the driver know that the executor is still alive and update it with metrics for in-progress tasks. spark.executor.heartbeatInterval should be significantly less than spark.network.timeout""",
203
351
  is_required=False,
204
352
  ),
353
+ "processTreeMetrics": Field(
354
+ Permissive(
355
+ fields={
356
+ "enabled": Field(
357
+ StringSource,
358
+ description="""Executor Metrics: Whether to collect process tree metrics (from the /proc filesystem) when collecting executor metrics. Note: The process tree metrics are collected only if the /proc filesystem exists.""",
359
+ is_required=False,
360
+ ),
361
+ }
362
+ )
363
+ ),
364
+ "metrics": Field(
365
+ Permissive(
366
+ fields={
367
+ "pollingInterval": Field(
368
+ StringSource,
369
+ description="""Executor Metrics: How often to collect executor metrics (in milliseconds). If 0, the polling is done on executor heartbeats (thus at the heartbeat interval, specified by spark.executor.heartbeatInterval). If positive, the polling is done at this interval.""",
370
+ is_required=False,
371
+ ),
372
+ "fileSystemSchemes": Field(
373
+ StringSource,
374
+ description="""Executor Metrics: The file system schemes to report in executor metrics.""",
375
+ is_required=False,
376
+ ),
377
+ }
378
+ )
379
+ ),
205
380
  }
206
381
  )
207
382
  ),
@@ -215,7 +390,7 @@ def spark_config():
215
390
  fields={
216
391
  "dir": Field(
217
392
  StringSource,
218
- description="""Application Properties: Directory to use for "scratch" space in Spark, including map output files and RDDs that get stored on disk. This should be on a fast, local disk in your system. It can also be a comma-separated list of multiple directories on different disks. NOTE: In Spark 1.0 and later this will be overridden by SPARK_LOCAL_DIRS (Standalone), MESOS_SANDBOX (Mesos) or LOCAL_DIRS (YARN) environment variables set by the cluster manager.""",
393
+ description="""Application Properties: Directory to use for "scratch" space in Spark, including map output files and RDDs that get stored on disk. This should be on a fast, local disk in your system. It can also be a comma-separated list of multiple directories on different disks. Note: This will be overridden by SPARK_LOCAL_DIRS (Standalone), MESOS_SANDBOX (Mesos) or LOCAL_DIRS (YARN) environment variables set by the cluster manager.""",
219
394
  is_required=False,
220
395
  ),
221
396
  }
@@ -255,6 +430,22 @@ def spark_config():
255
430
  description="""Application Properties: Application information that will be written into Yarn RM log/HDFS audit log when running on Yarn/HDFS. Its length depends on the Hadoop configuration hadoop.caller.context.max.size. It should be concise, and typically can have up to 50 characters.""",
256
431
  is_required=False,
257
432
  ),
433
+ "level": Field(
434
+ StringSource,
435
+ description="""Application Properties: When set, overrides any user-defined log settings as if calling SparkContext.setLogLevel() at Spark startup. Valid log levels include: "ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN".""",
436
+ is_required=False,
437
+ ),
438
+ }
439
+ )
440
+ ),
441
+ "decommission": Field(
442
+ Permissive(
443
+ fields={
444
+ "enabled": Field(
445
+ StringSource,
446
+ description="""Application Properties: When decommission enabled, Spark will try its best to shut down the executor gracefully. Spark will try to migrate all the RDD blocks (controlled by spark.storage.decommission.rddBlocks.enabled) and shuffle blocks (controlled by spark.storage.decommission.shuffleBlocks.enabled) from the decommissioning executor to a remote executor when spark.storage.decommission.enabled is enabled. With decommission enabled, Spark will also decommission an executor instead of killing when spark.dynamicAllocation.enabled enabled.""",
447
+ is_required=False,
448
+ ),
258
449
  }
259
450
  )
260
451
  ),
@@ -266,6 +457,17 @@ def spark_config():
266
457
  description="""Runtime Environment: Regex to decide which Spark configuration properties and environment variables in driver and executor environments contain sensitive information. When this regex matches a property key or value, the value is redacted from the environment UI and various logs like YARN and event logs.""",
267
458
  is_required=False,
268
459
  ),
460
+ "string": Field(
461
+ Permissive(
462
+ fields={
463
+ "regex": Field(
464
+ StringSource,
465
+ description="""Runtime Environment: Regex to decide which parts of strings produced by Spark contain sensitive information. When this regex matches a string part, that string part is replaced by a dummy value. This is currently used to redact the output of SQL explain commands.""",
466
+ is_required=False,
467
+ ),
468
+ }
469
+ )
470
+ ),
269
471
  }
270
472
  )
271
473
  ),
@@ -282,7 +484,7 @@ def spark_config():
282
484
  ),
283
485
  "dump": Field(
284
486
  StringSource,
285
- description="""Runtime Environment: The directory which is used to dump the profile result before driver exiting. The results will be dumped as separated file for each RDD. They can be loaded by ptats.Stats(). If this is specified, the profile result will not be displayed automatically.""",
487
+ description="""Runtime Environment: The directory which is used to dump the profile result before driver exiting. The results will be dumped as separated file for each RDD. They can be loaded by pstats.Stats(). If this is specified, the profile result will not be displayed automatically.""",
286
488
  is_required=False,
287
489
  ),
288
490
  }
@@ -298,7 +500,7 @@ def spark_config():
298
500
  ),
299
501
  "reuse": Field(
300
502
  Bool,
301
- description="""Runtime Environment: Reuse Python worker or not. If yes, it will use a fixed number of Python workers, does not need to fork() a Python process for every task. It will be very useful if there is large broadcast, then the broadcast will not be needed to transferred from JVM to Python worker for every task.""",
503
+ description="""Runtime Environment: Reuse Python worker or not. If yes, it will use a fixed number of Python workers, does not need to fork() a Python process for every task. It will be very useful if there is a large broadcast, then the broadcast will not need to be transferred from JVM to Python worker for every task.""",
302
504
  is_required=False,
303
505
  ),
304
506
  }
@@ -315,6 +517,22 @@ def spark_config():
315
517
  description="""Runtime Environment: Comma-separated list of files to be placed in the working directory of each executor. Globs are allowed.""",
316
518
  is_required=False,
317
519
  ),
520
+ "io": Field(
521
+ Permissive(
522
+ fields={
523
+ "connectionTimeout": Field(
524
+ StringSource,
525
+ description="""Shuffle Behavior: Timeout for the established connections for fetching files in Spark RPC environments to be marked as idled and closed if there are still outstanding files being downloaded but no traffic no the channel for at least `connectionTimeout`.""",
526
+ is_required=False,
527
+ ),
528
+ "connectionCreationTimeout": Field(
529
+ StringSource,
530
+ description="""Shuffle Behavior: Timeout for establishing a connection for fetching files in Spark RPC environments.""",
531
+ is_required=False,
532
+ ),
533
+ }
534
+ )
535
+ ),
318
536
  "fetchTimeout": Field(
319
537
  StringSource,
320
538
  description="""Execution Behavior: Communication timeout to use when fetching files added through SparkContext.addFile() from the driver.""",
@@ -327,7 +545,17 @@ def spark_config():
327
545
  ),
328
546
  "overwrite": Field(
329
547
  Bool,
330
- description="""Execution Behavior: Whether to overwrite files added through SparkContext.addFile() when the target file exists and its contents do not match those of the source.""",
548
+ description="""Execution Behavior: Whether to overwrite any files which exist at the startup. Users can not overwrite the files added by SparkContext.addFile or SparkContext.addJar before even if this option is set true.""",
549
+ is_required=False,
550
+ ),
551
+ "ignoreCorruptFiles": Field(
552
+ StringSource,
553
+ description="""Execution Behavior: Whether to ignore corrupt files. If true, the Spark jobs will continue to run when encountering corrupted or non-existing files and contents that have been read will still be returned.""",
554
+ is_required=False,
555
+ ),
556
+ "ignoreMissingFiles": Field(
557
+ StringSource,
558
+ description="""Execution Behavior: Whether to ignore missing files. If true, the Spark jobs will continue to run when encountering missing files and the contents that have been read will still be returned.""",
331
559
  is_required=False,
332
560
  ),
333
561
  "maxPartitionBytes": Field(
@@ -368,7 +596,7 @@ def spark_config():
368
596
  ),
369
597
  "ivySettings": Field(
370
598
  StringSource,
371
- description="""Runtime Environment: Path to an Ivy settings file to customize resolution of jars specified using spark.jars.packages instead of the built-in defaults, such as maven central. Additional repositories given by the command-line option --repositories or spark.jars.repositories will also be included. Useful for allowing Spark to resolve artifacts from behind a firewall e.g. via an in-house artifact server like Artifactory. Details on the settings file format can be found at http://ant.apache.org/ivy/history/latest-milestone/settings.html""",
599
+ description="""Runtime Environment: Path to an Ivy settings file to customize resolution of jars specified using spark.jars.packages instead of the built-in defaults, such as maven central. Additional repositories given by the command-line option --repositories or spark.jars.repositories will also be included. Useful for allowing Spark to resolve artifacts from behind a firewall e.g. via an in-house artifact server like Artifactory. Details on the settings file format can be found at Settings Files. Only paths with file:// scheme are supported. Paths without a scheme are assumed to have a file:// scheme. When running in YARN cluster mode, this file will also be localized to the remote driver for dependency resolution within SparkContext#addJar""",
372
600
  is_required=False,
373
601
  ),
374
602
  "repositories": Field(
@@ -379,6 +607,11 @@ def spark_config():
379
607
  }
380
608
  )
381
609
  ),
610
+ "archives": Field(
611
+ StringSource,
612
+ description="""Runtime Environment: Comma-separated list of archives to be extracted into the working directory of each executor. .jar, .tar.gz, .tgz and .zip are supported. You can specify the directory name to unpack via adding # after the file name to unpack, for example, file.zip#directory. This configuration is experimental.""",
613
+ is_required=False,
614
+ ),
382
615
  "pyspark": Field(
383
616
  Permissive(
384
617
  fields={
@@ -422,11 +655,6 @@ def spark_config():
422
655
  }
423
656
  )
424
657
  ),
425
- "maxRemoteBlockSizeFetchToMem": Field(
426
- IntSource,
427
- description="""Shuffle Behavior: The remote block will be fetched to disk when size of the block is above this threshold in bytes. This is to avoid a giant request that takes too much memory. By default, this is only enabled for blocks > 2GB, as those cannot be fetched directly into memory, no matter what resources are available. But it can be turned down to a much lower value (eg. 200m) to avoid using too much memory on smaller blocks as well. Note this configuration will affect both shuffle fetch and block manager remote block fetch. For users who enabled external shuffle service, this feature can only be used when external shuffle service is newer than Spark 2.2.""",
428
- is_required=False,
429
- ),
430
658
  "shuffle": Field(
431
659
  Permissive(
432
660
  fields={
@@ -446,6 +674,45 @@ def spark_config():
446
674
  }
447
675
  )
448
676
  ),
677
+ "unsafe": Field(
678
+ Permissive(
679
+ fields={
680
+ "file": Field(
681
+ Permissive(
682
+ fields={
683
+ "output": Field(
684
+ Permissive(
685
+ fields={
686
+ "buffer": Field(
687
+ StringSource,
688
+ description="""Shuffle Behavior: The file system for this buffer size after each partition is written in unsafe shuffle writer. In KiB unless otherwise specified.""",
689
+ is_required=False,
690
+ ),
691
+ }
692
+ )
693
+ ),
694
+ }
695
+ )
696
+ ),
697
+ }
698
+ )
699
+ ),
700
+ "spill": Field(
701
+ Permissive(
702
+ fields={
703
+ "diskWriteBufferSize": Field(
704
+ StringSource,
705
+ description="""Shuffle Behavior: The buffer size, in bytes, to use when writing the sorted records to an on-disk file.""",
706
+ is_required=False,
707
+ ),
708
+ "compress": Field(
709
+ Bool,
710
+ description="""Shuffle Behavior: Whether to compress data spilled during shuffles. Compression will use spark.io.compression.codec.""",
711
+ is_required=False,
712
+ ),
713
+ }
714
+ )
715
+ ),
449
716
  "io": Field(
450
717
  Permissive(
451
718
  fields={
@@ -469,6 +736,21 @@ def spark_config():
469
736
  description="""Shuffle Behavior: (Netty only) How long to wait between retries of fetches. The maximum delay caused by retrying is 15 seconds by default, calculated as maxRetries * retryWait.""",
470
737
  is_required=False,
471
738
  ),
739
+ "backLog": Field(
740
+ StringSource,
741
+ description="""Shuffle Behavior: Length of the accept queue for the shuffle service. For large applications, this value may need to be increased, so that incoming connections are not dropped if the service cannot keep up with a large number of connections arriving in a short period of time. This needs to be configured wherever the shuffle service itself is running, which may be outside of the application (see spark.shuffle.service.enabled option below). If set below 1, will fallback to OS default defined by Netty's io.netty.util.NetUtil#SOMAXCONN.""",
742
+ is_required=False,
743
+ ),
744
+ "connectionTimeout": Field(
745
+ StringSource,
746
+ description="""Shuffle Behavior: Timeout for the established connections between shuffle servers and clients to be marked as idled and closed if there are still outstanding fetch requests but no traffic no the channel for at least `connectionTimeout`.""",
747
+ is_required=False,
748
+ ),
749
+ "connectionCreationTimeout": Field(
750
+ StringSource,
751
+ description="""Shuffle Behavior: Timeout for establishing a connection between the shuffle servers and clients.""",
752
+ is_required=False,
753
+ ),
472
754
  }
473
755
  )
474
756
  ),
@@ -477,7 +759,7 @@ def spark_config():
477
759
  fields={
478
760
  "enabled": Field(
479
761
  Bool,
480
- description="""Shuffle Behavior: Enables the external shuffle service. This service preserves the shuffle files written by executors so the executors can be safely removed. This must be enabled if spark.dynamicAllocation.enabled is "true". The external shuffle service must be set up in order to enable it. See dynamic allocation configuration and setup documentation for more information.""",
762
+ description="""Shuffle Behavior: Enables the external shuffle service. This service preserves the shuffle files written by executors e.g. so that executors can be safely removed, or so that shuffle fetches can continue in the event of executor failure. The external shuffle service must be set up in order to enable it. See dynamic allocation configuration and setup documentation for more information.""",
481
763
  is_required=False,
482
764
  ),
483
765
  "port": Field(
@@ -485,6 +767,11 @@ def spark_config():
485
767
  description="""Shuffle Behavior: Port on which the external shuffle service will run.""",
486
768
  is_required=False,
487
769
  ),
770
+ "name": Field(
771
+ StringSource,
772
+ description="""Shuffle Behavior: The configured name of the Spark shuffle service the client should communicate with. This must match the name used to configure the Shuffle within the YARN NodeManager configuration (yarn.nodemanager.aux-services). Only takes effect when spark.shuffle.service.enabled is set to true.""",
773
+ is_required=False,
774
+ ),
488
775
  "index": Field(
489
776
  Permissive(
490
777
  fields={
@@ -493,7 +780,7 @@ def spark_config():
493
780
  fields={
494
781
  "size": Field(
495
782
  StringSource,
496
- description="""Shuffle Behavior: Cache entries limited to the specified memory footprint in bytes.""",
783
+ description="""Shuffle Behavior: Cache entries limited to the specified memory footprint, in bytes unless otherwise specified.""",
497
784
  is_required=False,
498
785
  ),
499
786
  }
@@ -502,6 +789,44 @@ def spark_config():
502
789
  }
503
790
  )
504
791
  ),
792
+ "removeShuffle": Field(
793
+ StringSource,
794
+ description="""Shuffle Behavior: Whether to use the ExternalShuffleService for deleting shuffle blocks for deallocated executors when the shuffle is no longer needed. Without this enabled, shuffle data on executors that are deallocated will remain on disk until the application ends.""",
795
+ is_required=False,
796
+ ),
797
+ "fetch": Field(
798
+ Permissive(
799
+ fields={
800
+ "rdd": Field(
801
+ Permissive(
802
+ fields={
803
+ "enabled": Field(
804
+ StringSource,
805
+ description="""Shuffle Behavior: Whether to use the ExternalShuffleService for fetching disk persisted RDD blocks. In case of dynamic allocation if this feature is enabled executors having only disk persisted blocks are considered idle after spark.dynamicAllocation.executorIdleTimeout and will be released accordingly.""",
806
+ is_required=False,
807
+ ),
808
+ }
809
+ )
810
+ ),
811
+ }
812
+ )
813
+ ),
814
+ "db": Field(
815
+ Permissive(
816
+ fields={
817
+ "enabled": Field(
818
+ StringSource,
819
+ description="""Shuffle Behavior: Whether to use db in ExternalShuffleService. Note that this only affects standalone mode.""",
820
+ is_required=False,
821
+ ),
822
+ "backend": Field(
823
+ StringSource,
824
+ description="""Shuffle Behavior: Specifies a disk-based store used in shuffle service local db. Setting as LEVELDB or ROCKSDB.""",
825
+ is_required=False,
826
+ ),
827
+ }
828
+ )
829
+ ),
505
830
  }
506
831
  )
507
832
  ),
@@ -518,16 +843,22 @@ def spark_config():
518
843
  description="""Shuffle Behavior: (Advanced) In the sort-based shuffle manager, avoid merge-sorting data if there is no map-side aggregation and there are at most this many reduce partitions.""",
519
844
  is_required=False,
520
845
  ),
521
- }
522
- )
523
- ),
524
- "spill": Field(
525
- Permissive(
526
- fields={
527
- "compress": Field(
528
- Bool,
529
- description="""Shuffle Behavior: Whether to compress data spilled during shuffles. Compression will use spark.io.compression.codec.""",
530
- is_required=False,
846
+ "io": Field(
847
+ Permissive(
848
+ fields={
849
+ "plugin": Field(
850
+ Permissive(
851
+ fields={
852
+ "class": Field(
853
+ StringSource,
854
+ description="""Shuffle Behavior: Name of the class to use for shuffle IO.""",
855
+ is_required=False,
856
+ ),
857
+ }
858
+ )
859
+ ),
860
+ }
861
+ )
531
862
  ),
532
863
  }
533
864
  )
@@ -553,11 +884,70 @@ def spark_config():
553
884
  }
554
885
  )
555
886
  ),
556
- "memoryFraction": Field(
557
- Float,
558
- description="""Memory Management: (deprecated) This is read only if spark.memory.useLegacyMode is enabled. Fraction of Java heap to use for aggregation and cogroups during shuffles. At any given time, the collective size of all in-memory maps used for shuffles is bounded by this limit, beyond which the contents will begin to spill to disk. If spills are often, consider increasing this value at the expense of spark.storage.memoryFraction.""",
887
+ "reduceLocality": Field(
888
+ Permissive(
889
+ fields={
890
+ "enabled": Field(
891
+ StringSource,
892
+ description="""Shuffle Behavior: Whether to compute locality preferences for reduce tasks.""",
893
+ is_required=False,
894
+ ),
895
+ }
896
+ )
897
+ ),
898
+ "mapOutput": Field(
899
+ Permissive(
900
+ fields={
901
+ "minSizeForBroadcast": Field(
902
+ StringSource,
903
+ description="""Shuffle Behavior: The size at which we use Broadcast to send the map output statuses to the executors.""",
904
+ is_required=False,
905
+ ),
906
+ }
907
+ )
908
+ ),
909
+ "detectCorrupt": Field(
910
+ Permissive(
911
+ fields={
912
+ "root": Field(
913
+ StringSource,
914
+ description="""Shuffle Behavior: Whether to detect any corruption in fetched blocks.""",
915
+ is_required=False,
916
+ ),
917
+ "useExtraMemory": Field(
918
+ StringSource,
919
+ description="""Shuffle Behavior: If enabled, part of a compressed/encrypted stream will be de-compressed/de-crypted by using extra memory to detect early corruption. Any IOException thrown will cause the task to be retried once and if it fails again with same exception, then FetchFailedException will be thrown to retry previous stage.""",
920
+ is_required=False,
921
+ ),
922
+ }
923
+ )
924
+ ),
925
+ "useOldFetchProtocol": Field(
926
+ StringSource,
927
+ description="""Shuffle Behavior: Whether to use the old protocol while doing the shuffle block fetching. It is only enabled while we need the compatibility in the scenario of new Spark version job fetching shuffle blocks from old version external shuffle service.""",
559
928
  is_required=False,
560
929
  ),
930
+ "readHostLocalDisk": Field(
931
+ StringSource,
932
+ description="""Shuffle Behavior: If enabled (and spark.shuffle.useOldFetchProtocol is disabled, shuffle blocks requested from those block managers which are running on the same host are read from the disk directly instead of being fetched as remote blocks over the network.""",
933
+ is_required=False,
934
+ ),
935
+ "checksum": Field(
936
+ Permissive(
937
+ fields={
938
+ "enabled": Field(
939
+ StringSource,
940
+ description="""Shuffle Behavior: Whether to calculate the checksum of shuffle data. If enabled, Spark will calculate the checksum values for each partition data within the map output file and store the values in a checksum file on the disk. When there's shuffle data corruption detected, Spark will try to diagnose the cause (e.g., network issue, disk issue, etc.) of the corruption by using the checksum file.""",
941
+ is_required=False,
942
+ ),
943
+ "algorithm": Field(
944
+ StringSource,
945
+ description="""Shuffle Behavior: The algorithm is used to calculate the shuffle checksum. Currently, it only supports built-in algorithms of JDK, e.g., ADLER32, CRC32.""",
946
+ is_required=False,
947
+ ),
948
+ }
949
+ )
950
+ ),
561
951
  }
562
952
  )
563
953
  ),
@@ -588,9 +978,31 @@ def spark_config():
588
978
  ),
589
979
  "compress": Field(
590
980
  StringSource,
591
- description="""Spark UI: Whether to compress logged events, if spark.eventLog.enabled is true. Compression will use spark.io.compression.codec.""",
981
+ description="""Spark UI: Whether to compress logged events, if spark.eventLog.enabled is true.""",
592
982
  is_required=False,
593
983
  ),
984
+ "compression": Field(
985
+ Permissive(
986
+ fields={
987
+ "codec": Field(
988
+ StringSource,
989
+ description="""Spark UI: The codec to compress logged events. By default, Spark provides four codecs: lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify the codec, e.g. org.apache.spark.io.LZ4CompressionCodec, org.apache.spark.io.LZFCompressionCodec, org.apache.spark.io.SnappyCompressionCodec, and org.apache.spark.io.ZStdCompressionCodec.""",
990
+ is_required=False,
991
+ ),
992
+ }
993
+ )
994
+ ),
995
+ "erasureCoding": Field(
996
+ Permissive(
997
+ fields={
998
+ "enabled": Field(
999
+ StringSource,
1000
+ description="""Spark UI: Whether to allow event logs to use erasure coding, or turn erasure coding off, regardless of filesystem defaults. On HDFS, erasure coded files will not update as quickly as regular replicated files, so the application updates will take longer to appear in the History Server. Note that even if this is true, Spark will still not force the file to use erasure coding, it will simply use filesystem defaults.""",
1001
+ is_required=False,
1002
+ ),
1003
+ }
1004
+ )
1005
+ ),
594
1006
  "dir": Field(
595
1007
  StringSource,
596
1008
  description="""Spark UI: Base directory in which Spark events are logged, if spark.eventLog.enabled is true. Within this base directory, Spark creates a sub-directory for each application, and logs the events specific to the application in this directory. Users may want to set this to a unified location like an HDFS directory so history files can be read by the history server.""",
@@ -617,6 +1029,43 @@ def spark_config():
617
1029
  }
618
1030
  )
619
1031
  ),
1032
+ "rolling": Field(
1033
+ Permissive(
1034
+ fields={
1035
+ "enabled": Field(
1036
+ StringSource,
1037
+ description="""Spark UI: Whether rolling over event log files is enabled. If set to true, it cuts down each event log file to the configured size.""",
1038
+ is_required=False,
1039
+ ),
1040
+ "maxFileSize": Field(
1041
+ StringSource,
1042
+ description="""Spark UI: When spark.eventLog.rolling.enabled=true, specifies the max size of event log file before it's rolled over.""",
1043
+ is_required=False,
1044
+ ),
1045
+ }
1046
+ )
1047
+ ),
1048
+ "logStageExecutorMetrics": Field(
1049
+ StringSource,
1050
+ description="""Executor Metrics: Whether to write per-stage peaks of executor metrics (for each executor) to the event log. Note: The metrics are polled (collected) and sent in the executor heartbeat, and this is always done; this configuration is only to determine if aggregated metric peaks are written to the event log.""",
1051
+ is_required=False,
1052
+ ),
1053
+ "gcMetrics": Field(
1054
+ Permissive(
1055
+ fields={
1056
+ "youngGenerationGarbageCollectors": Field(
1057
+ StringSource,
1058
+ description="""Executor Metrics: Names of supported young generation garbage collector. A name usually is the return of GarbageCollectorMXBean.getName. The built-in young generation garbage collectors are Copy,PS Scavenge,ParNew,G1 Young Generation.""",
1059
+ is_required=False,
1060
+ ),
1061
+ "oldGenerationGarbageCollectors": Field(
1062
+ StringSource,
1063
+ description="""Executor Metrics: Names of supported old generation garbage collector. A name usually is the return of GarbageCollectorMXBean.getName. The built-in old generation garbage collectors are MarkSweepCompact,PS MarkSweep,ConcurrentMarkSweep,G1 Old Generation.""",
1064
+ is_required=False,
1065
+ ),
1066
+ }
1067
+ )
1068
+ ),
620
1069
  }
621
1070
  )
622
1071
  ),
@@ -639,6 +1088,17 @@ def spark_config():
639
1088
  description="""Spark UI: Whether to run the web UI for the Spark application.""",
640
1089
  is_required=False,
641
1090
  ),
1091
+ "store": Field(
1092
+ Permissive(
1093
+ fields={
1094
+ "path": Field(
1095
+ StringSource,
1096
+ description="""Spark UI: Local directory where to cache application information for live UI. By default this is not set, meaning all application information will be kept in memory.""",
1097
+ is_required=False,
1098
+ ),
1099
+ }
1100
+ )
1101
+ ),
642
1102
  "killEnabled": Field(
643
1103
  StringSource,
644
1104
  description="""Spark UI: Allows jobs and stages to be killed from the web UI.""",
@@ -652,6 +1112,11 @@ def spark_config():
652
1112
  description="""Spark UI: How often to update live entities. -1 means "never update" when replaying applications, meaning only the last write will happen. For live applications, this avoids a few operations that we can live without when rapidly processing incoming task events.""",
653
1113
  is_required=False,
654
1114
  ),
1115
+ "minFlushPeriod": Field(
1116
+ StringSource,
1117
+ description="""Spark UI: Minimum time elapsed before stale UI data is flushed. This avoids UI staleness when incoming task events are not fired frequently.""",
1118
+ is_required=False,
1119
+ ),
655
1120
  }
656
1121
  )
657
1122
  ),
@@ -672,7 +1137,7 @@ def spark_config():
672
1137
  ),
673
1138
  "retainedTasks": Field(
674
1139
  StringSource,
675
- description="""Spark UI: How many tasks the Spark UI and status APIs remember before garbage collecting. This is a target maximum, and fewer elements may be retained in some circumstances.""",
1140
+ description="""Spark UI: How many tasks in one stage the Spark UI and status APIs remember before garbage collecting. This is a target maximum, and fewer elements may be retained in some circumstances.""",
676
1141
  is_required=False,
677
1142
  ),
678
1143
  "reverseProxy": Field(
@@ -682,14 +1147,42 @@ def spark_config():
682
1147
  ),
683
1148
  "reverseProxyUrl": Field(
684
1149
  StringSource,
685
- description="""Spark UI: This is the URL where your proxy is running. This URL is for proxy which is running in front of Spark Master. This is useful when running proxy for authentication e.g. OAuth proxy. Make sure this is a complete URL including scheme (http/https) and port to reach your proxy.""",
1150
+ description="""Spark UI: If the Spark UI should be served through another front-end reverse proxy, this is the URL for accessing the Spark master UI through that reverse proxy. This is useful when running proxy for authentication e.g. an OAuth proxy. The URL may contain a path prefix, like http://mydomain.com/path/to/spark/, allowing you to serve the UI for multiple Spark clusters and other web applications through the same virtual host and port. Normally, this should be an absolute URL including scheme (http/https), host and port. It is possible to specify a relative URL starting with "/" here. In this case, all URLs generated by the Spark UI and Spark REST APIs will be server-relative links -- this will still work, as the entire Spark UI is served through the same host and port. The setting affects link generation in the Spark UI, but the front-end reverse proxy is responsible for stripping a path prefix before forwarding the request, rewriting redirects which point directly to the Spark master, redirecting access from http://mydomain.com/path/to/spark to http://mydomain.com/path/to/spark/ (trailing slash after path prefix); otherwise relative links on the master page do not work correctly. This setting affects all the workers and application UIs running in the cluster and must be set identically on all the workers, drivers and masters. In is only effective when spark.ui.reverseProxy is turned on. This setting is not needed when the Spark master web UI is directly reachable. Note that the value of the setting can't contain the keyword `proxy` or `history` after split by "/". Spark UI relies on both keywords for getting REST API endpoints from URIs.""",
1151
+ is_required=False,
1152
+ ),
1153
+ "proxyRedirectUri": Field(
1154
+ StringSource,
1155
+ description="""Spark UI: Where to address redirects when Spark is running behind a proxy. This will make Spark modify redirect responses so they point to the proxy server, instead of the Spark UI's own address. This should be only the address of the server, without any prefix paths for the application; the prefix should be set either by the proxy server itself (by adding the X-Forwarded-Context request header), or by setting the proxy base in the Spark app's configuration.""",
686
1156
  is_required=False,
687
1157
  ),
688
1158
  "showConsoleProgress": Field(
689
1159
  StringSource,
690
- description="""Spark UI: Show the progress bar in the console. The progress bar shows the progress of stages that run for longer than 500ms. If multiple stages run at the same time, multiple progress bars will be displayed on the same line.""",
1160
+ description="""Spark UI: Show the progress bar in the console. The progress bar shows the progress of stages that run for longer than 500ms. If multiple stages run at the same time, multiple progress bars will be displayed on the same line. Note: In shell environment, the default value of spark.ui.showConsoleProgress is true.""",
691
1161
  is_required=False,
692
1162
  ),
1163
+ "custom": Field(
1164
+ Permissive(
1165
+ fields={
1166
+ "executor": Field(
1167
+ Permissive(
1168
+ fields={
1169
+ "log": Field(
1170
+ Permissive(
1171
+ fields={
1172
+ "url": Field(
1173
+ StringSource,
1174
+ description="""Spark UI: Specifies custom spark executor log URL for supporting external log service instead of using cluster managers' application log URLs in Spark UI. Spark will support some path variables via patterns which can vary on cluster manager. Please check the documentation for your cluster manager to see which patterns are supported, if any. Please note that this configuration also replaces original log urls in event log, which will be also effective when accessing the application on history server. The new log urls must be permanent, otherwise you might have dead link for executor log urls. For now, only YARN and K8s cluster manager supports this configuration""",
1175
+ is_required=False,
1176
+ ),
1177
+ }
1178
+ )
1179
+ ),
1180
+ }
1181
+ )
1182
+ ),
1183
+ }
1184
+ )
1185
+ ),
693
1186
  "retainedDeadExecutors": Field(
694
1187
  StringSource,
695
1188
  description="""Spark UI: How many dead executors the Spark UI and status APIs remember before garbage collecting.""",
@@ -700,6 +1193,66 @@ def spark_config():
700
1193
  description="""Spark UI: Comma separated list of filter class names to apply to the Spark Web UI. The filter should be a standard javax servlet Filter. Filter parameters can also be specified in the configuration, by setting config entries of the form spark.<class name of filter>.param.<param name>=<value> For example: spark.ui.filters=com.test.filter1 spark.com.test.filter1.param.name1=foo spark.com.test.filter1.param.name2=bar""",
701
1194
  is_required=False,
702
1195
  ),
1196
+ "requestHeaderSize": Field(
1197
+ StringSource,
1198
+ description="""Spark UI: The maximum allowed size for a HTTP request header, in bytes unless otherwise specified. This setting applies for the Spark History Server too.""",
1199
+ is_required=False,
1200
+ ),
1201
+ "timelineEnabled": Field(
1202
+ StringSource,
1203
+ description="""Spark UI: Whether to display event timeline data on UI pages.""",
1204
+ is_required=False,
1205
+ ),
1206
+ "timeline": Field(
1207
+ Permissive(
1208
+ fields={
1209
+ "executors": Field(
1210
+ Permissive(
1211
+ fields={
1212
+ "maximum": Field(
1213
+ StringSource,
1214
+ description="""Spark UI: The maximum number of executors shown in the event timeline.""",
1215
+ is_required=False,
1216
+ ),
1217
+ }
1218
+ )
1219
+ ),
1220
+ "jobs": Field(
1221
+ Permissive(
1222
+ fields={
1223
+ "maximum": Field(
1224
+ StringSource,
1225
+ description="""Spark UI: The maximum number of jobs shown in the event timeline.""",
1226
+ is_required=False,
1227
+ ),
1228
+ }
1229
+ )
1230
+ ),
1231
+ "stages": Field(
1232
+ Permissive(
1233
+ fields={
1234
+ "maximum": Field(
1235
+ StringSource,
1236
+ description="""Spark UI: The maximum number of stages shown in the event timeline.""",
1237
+ is_required=False,
1238
+ ),
1239
+ }
1240
+ )
1241
+ ),
1242
+ "tasks": Field(
1243
+ Permissive(
1244
+ fields={
1245
+ "maximum": Field(
1246
+ StringSource,
1247
+ description="""Spark UI: The maximum number of tasks shown in the event timeline.""",
1248
+ is_required=False,
1249
+ ),
1250
+ }
1251
+ )
1252
+ ),
1253
+ }
1254
+ )
1255
+ ),
703
1256
  }
704
1257
  )
705
1258
  ),
@@ -774,7 +1327,7 @@ def spark_config():
774
1327
  ),
775
1328
  "blockInterval": Field(
776
1329
  StringSource,
777
- description="""Spark Streaming: Interval at which data received by Spark Streaming receivers is chunked into blocks of data before storing them in Spark. Minimum recommended - 50 ms. See the performance tuning section in the Spark Streaming programing guide for more details.""",
1330
+ description="""Spark Streaming: Interval at which data received by Spark Streaming receivers is chunked into blocks of data before storing them in Spark. Minimum recommended - 50 ms. See the performance tuning section in the Spark Streaming programming guide for more details.""",
778
1331
  is_required=False,
779
1332
  ),
780
1333
  "receiver": Field(
@@ -782,7 +1335,7 @@ def spark_config():
782
1335
  fields={
783
1336
  "maxRate": Field(
784
1337
  StringSource,
785
- description="""Spark Streaming: Maximum rate (number of records per second) at which each receiver will receive data. Effectively, each stream will consume at most this number of records per second. Setting this configuration to 0 or a negative number will put no limit on the rate. See the deployment guide in the Spark Streaming programing guide for mode details.""",
1338
+ description="""Spark Streaming: Maximum rate (number of records per second) at which each receiver will receive data. Effectively, each stream will consume at most this number of records per second. Setting this configuration to 0 or a negative number will put no limit on the rate. See the deployment guide in the Spark Streaming programming guide for mode details.""",
786
1339
  is_required=False,
787
1340
  ),
788
1341
  "writeAheadLog": Field(
@@ -790,7 +1343,7 @@ def spark_config():
790
1343
  fields={
791
1344
  "enable": Field(
792
1345
  StringSource,
793
- description="""Spark Streaming: Enable write-ahead logs for receivers. All the input data received through receivers will be saved to write-ahead logs that will allow it to be recovered after driver failures. See the deployment guide in the Spark Streaming programing guide for more details.""",
1346
+ description="""Spark Streaming: Enable write-ahead logs for receivers. All the input data received through receivers will be saved to write-ahead logs that will allow it to be recovered after driver failures. See the deployment guide in the Spark Streaming programming guide for more details.""",
794
1347
  is_required=False,
795
1348
  ),
796
1349
  "closeFileAfterWrite": Field(
@@ -827,11 +1380,6 @@ def spark_config():
827
1380
  description="""Spark Streaming: Minimum rate (number of records per second) at which data will be read from each Kafka partition when using the new Kafka direct stream API.""",
828
1381
  is_required=False,
829
1382
  ),
830
- "maxRetries": Field(
831
- StringSource,
832
- description="""Spark Streaming: Maximum number of consecutive retries the driver will make in order to find the latest offsets on the leader of each partition (a default value of 1 means that the driver will make a maximum of 2 attempts). Only applies to the new Kafka direct stream API.""",
833
- is_required=False,
834
- ),
835
1383
  }
836
1384
  )
837
1385
  ),
@@ -855,6 +1403,17 @@ def spark_config():
855
1403
  }
856
1404
  )
857
1405
  ),
1406
+ "appStatusStore": Field(
1407
+ Permissive(
1408
+ fields={
1409
+ "diskStoreDir": Field(
1410
+ StringSource,
1411
+ description="""Spark UI: Local directory where to store diagnostic information of SQL executions. This configuration is only for live UI.""",
1412
+ is_required=False,
1413
+ ),
1414
+ }
1415
+ )
1416
+ ),
858
1417
  "broadcast": Field(
859
1418
  Permissive(
860
1419
  fields={
@@ -873,6 +1432,22 @@ def spark_config():
873
1432
  description="""Execution Behavior: Whether to enable checksum for broadcast. If enabled, broadcasts will include a checksum, which can help detect corrupted blocks, at the cost of computing and sending a little more data. It's possible to disable it if the network has other mechanisms to guarantee data won't be corrupted during broadcast.""",
874
1433
  is_required=False,
875
1434
  ),
1435
+ "UDFCompressionThreshold": Field(
1436
+ StringSource,
1437
+ description="""Execution Behavior: The threshold at which user-defined functions (UDFs) and Python RDD commands are compressed by broadcast in bytes unless otherwise specified.""",
1438
+ is_required=False,
1439
+ ),
1440
+ }
1441
+ )
1442
+ ),
1443
+ "checkpoint": Field(
1444
+ Permissive(
1445
+ fields={
1446
+ "compress": Field(
1447
+ StringSource,
1448
+ description="""Compression and Serialization: Whether to compress RDD checkpoints. Generally a good idea. Compression will use spark.io.compression.codec.""",
1449
+ is_required=False,
1450
+ ),
876
1451
  }
877
1452
  )
878
1453
  ),
@@ -892,7 +1467,7 @@ def spark_config():
892
1467
  fields={
893
1468
  "blockSize": Field(
894
1469
  StringSource,
895
- description="""Compression and Serialization: Block size in bytes used in LZ4 compression, in the case when LZ4 compression codec is used. Lowering this block size will also lower shuffle memory usage when LZ4 is used.""",
1470
+ description="""Compression and Serialization: Block size used in LZ4 compression, in the case when LZ4 compression codec is used. Lowering this block size will also lower shuffle memory usage when LZ4 is used. Default unit is bytes, unless otherwise specified. This configuration only applies to `spark.io.compression.codec`.""",
896
1471
  is_required=False,
897
1472
  ),
898
1473
  }
@@ -903,7 +1478,7 @@ def spark_config():
903
1478
  fields={
904
1479
  "blockSize": Field(
905
1480
  StringSource,
906
- description="""Compression and Serialization: Block size in bytes used in Snappy compression, in the case when Snappy compression codec is used. Lowering this block size will also lower shuffle memory usage when Snappy is used.""",
1481
+ description="""Compression and Serialization: Block size in Snappy compression, in the case when Snappy compression codec is used. Lowering this block size will also lower shuffle memory usage when Snappy is used. Default unit is bytes, unless otherwise specified. This configuration only applies to `spark.io.compression.codec`.""",
907
1482
  is_required=False,
908
1483
  ),
909
1484
  }
@@ -914,14 +1489,25 @@ def spark_config():
914
1489
  fields={
915
1490
  "level": Field(
916
1491
  StringSource,
917
- description="""Compression and Serialization: Compression level for Zstd compression codec. Increasing the compression level will result in better compression at the expense of more CPU and memory.""",
1492
+ description="""Compression and Serialization: Compression level for Zstd compression codec. Increasing the compression level will result in better compression at the expense of more CPU and memory. This configuration only applies to `spark.io.compression.codec`.""",
918
1493
  is_required=False,
919
1494
  ),
920
1495
  "bufferSize": Field(
921
1496
  StringSource,
922
- description="""Compression and Serialization: Buffer size in bytes used in Zstd compression, in the case when Zstd compression codec is used. Lowering this size will lower the shuffle memory usage when Zstd is used, but it might increase the compression cost because of excessive JNI call overhead.""",
1497
+ description="""Compression and Serialization: Buffer size in bytes used in Zstd compression, in the case when Zstd compression codec is used. Lowering this size will lower the shuffle memory usage when Zstd is used, but it might increase the compression cost because of excessive JNI call overhead. This configuration only applies to `spark.io.compression.codec`.""",
923
1498
  is_required=False,
924
1499
  ),
1500
+ "bufferPool": Field(
1501
+ Permissive(
1502
+ fields={
1503
+ "enabled": Field(
1504
+ StringSource,
1505
+ description="""Compression and Serialization: If true, enable buffer pool of ZSTD JNI library.""",
1506
+ is_required=False,
1507
+ ),
1508
+ }
1509
+ )
1510
+ ),
925
1511
  }
926
1512
  )
927
1513
  ),
@@ -1034,31 +1620,21 @@ def spark_config():
1034
1620
  ),
1035
1621
  "size": Field(
1036
1622
  IntSource,
1037
- description="""Memory Management: The absolute amount of memory in bytes which can be used for off-heap allocation. This setting has no impact on heap memory usage, so if your executors' total memory consumption must fit within some hard limit then be sure to shrink your JVM heap size accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true.""",
1623
+ description="""Memory Management: The absolute amount of memory which can be used for off-heap allocation, in bytes unless otherwise specified. This setting has no impact on heap memory usage, so if your executors' total memory consumption must fit within some hard limit then be sure to shrink your JVM heap size accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true.""",
1038
1624
  is_required=False,
1039
1625
  ),
1040
1626
  }
1041
1627
  )
1042
1628
  ),
1043
- "useLegacyMode": Field(
1044
- Bool,
1045
- description="""Memory Management: Whether to enable the legacy memory management mode used in Spark 1.5 and before. The legacy mode rigidly partitions the heap space into fixed-size regions, potentially leading to excessive spilling if the application was not tuned. The following deprecated memory fraction configurations are not read unless this is enabled: spark.shuffle.memoryFraction spark.storage.memoryFraction spark.storage.unrollFraction""",
1046
- is_required=False,
1047
- ),
1048
1629
  }
1049
1630
  )
1050
1631
  ),
1051
1632
  "storage": Field(
1052
1633
  Permissive(
1053
1634
  fields={
1054
- "memoryFraction": Field(
1055
- Float,
1056
- description="""Memory Management: (deprecated) This is read only if spark.memory.useLegacyMode is enabled. Fraction of Java heap to use for Spark's memory cache. This should not be larger than the "old" generation of objects in the JVM, which by default is given 0.6 of the heap, but you can increase it if you configure your own old generation size.""",
1057
- is_required=False,
1058
- ),
1059
- "unrollFraction": Field(
1060
- Float,
1061
- description="""Memory Management: (deprecated) This is read only if spark.memory.useLegacyMode is enabled. Fraction of spark.storage.memoryFraction to use for unrolling blocks in memory. This is dynamically allocated by dropping existing blocks when there is not enough free storage space to unroll the new block in its entirety.""",
1635
+ "unrollMemoryThreshold": Field(
1636
+ StringSource,
1637
+ description="""Memory Management: Initial memory to request before unrolling any block.""",
1062
1638
  is_required=False,
1063
1639
  ),
1064
1640
  "replication": Field(
@@ -1072,11 +1648,81 @@ def spark_config():
1072
1648
  }
1073
1649
  )
1074
1650
  ),
1651
+ "localDiskByExecutors": Field(
1652
+ Permissive(
1653
+ fields={
1654
+ "cacheSize": Field(
1655
+ StringSource,
1656
+ description="""Memory Management: The max number of executors for which the local dirs are stored. This size is both applied for the driver and both for the executors side to avoid having an unbounded store. This cache will be used to avoid the network in case of fetching disk persisted RDD blocks or shuffle blocks (when spark.shuffle.readHostLocalDisk is set) from the same host.""",
1657
+ is_required=False,
1658
+ ),
1659
+ }
1660
+ )
1661
+ ),
1075
1662
  "memoryMapThreshold": Field(
1076
1663
  StringSource,
1077
- description="""Execution Behavior: Size in bytes of a block above which Spark memory maps when reading a block from disk. This prevents Spark from memory mapping very small blocks. In general, memory mapping has high overhead for blocks close to or below the page size of the operating system.""",
1664
+ description="""Execution Behavior: Size of a block above which Spark memory maps when reading a block from disk. Default unit is bytes, unless specified otherwise. This prevents Spark from memory mapping very small blocks. In general, memory mapping has high overhead for blocks close to or below the page size of the operating system.""",
1078
1665
  is_required=False,
1079
1666
  ),
1667
+ "decommission": Field(
1668
+ Permissive(
1669
+ fields={
1670
+ "enabled": Field(
1671
+ StringSource,
1672
+ description="""Execution Behavior: Whether to decommission the block manager when decommissioning executor.""",
1673
+ is_required=False,
1674
+ ),
1675
+ "shuffleBlocks": Field(
1676
+ Permissive(
1677
+ fields={
1678
+ "enabled": Field(
1679
+ StringSource,
1680
+ description="""Execution Behavior: Whether to transfer shuffle blocks during block manager decommissioning. Requires a migratable shuffle resolver (like sort based shuffle).""",
1681
+ is_required=False,
1682
+ ),
1683
+ "maxThreads": Field(
1684
+ StringSource,
1685
+ description="""Execution Behavior: Maximum number of threads to use in migrating shuffle files.""",
1686
+ is_required=False,
1687
+ ),
1688
+ "maxDiskSize": Field(
1689
+ StringSource,
1690
+ description="""Execution Behavior: Maximum disk space to use to store shuffle blocks before rejecting remote shuffle blocks. Rejecting remote shuffle blocks means that an executor will not receive any shuffle migrations, and if there are no other executors available for migration then shuffle blocks will be lost unless spark.storage.decommission.fallbackStorage.path is configured.""",
1691
+ is_required=False,
1692
+ ),
1693
+ }
1694
+ )
1695
+ ),
1696
+ "rddBlocks": Field(
1697
+ Permissive(
1698
+ fields={
1699
+ "enabled": Field(
1700
+ StringSource,
1701
+ description="""Execution Behavior: Whether to transfer RDD blocks during block manager decommissioning.""",
1702
+ is_required=False,
1703
+ ),
1704
+ }
1705
+ )
1706
+ ),
1707
+ "fallbackStorage": Field(
1708
+ Permissive(
1709
+ fields={
1710
+ "path": Field(
1711
+ StringSource,
1712
+ description="""Execution Behavior: The location for fallback storage during block manager decommissioning. For example, s3a://spark-storage/. In case of empty, fallback storage is disabled. The storage should be managed by TTL because Spark will not clean it up.""",
1713
+ is_required=False,
1714
+ ),
1715
+ "cleanUp": Field(
1716
+ StringSource,
1717
+ description="""Execution Behavior: If true, Spark cleans up its fallback storage data during shutting down.""",
1718
+ is_required=False,
1719
+ ),
1720
+ }
1721
+ )
1722
+ ),
1723
+ }
1724
+ )
1725
+ ),
1080
1726
  }
1081
1727
  )
1082
1728
  ),
@@ -1164,7 +1810,7 @@ def spark_config():
1164
1810
  fields={
1165
1811
  "version": Field(
1166
1812
  IntSource,
1167
- description="""Execution Behavior: The file output committer algorithm version, valid algorithm version number: 1 or 2. Version 2 may have better performance, but version 1 may handle failures better in certain situations, as per MAPREDUCE-4815.""",
1813
+ description="""Execution Behavior: The file output committer algorithm version, valid algorithm version number: 1 or 2. Note that 2 may cause a correctness issue like MAPREDUCE-7282.""",
1168
1814
  is_required=False,
1169
1815
  ),
1170
1816
  }
@@ -1187,23 +1833,28 @@ def spark_config():
1187
1833
  fields={
1188
1834
  "maxSize": Field(
1189
1835
  StringSource,
1190
- description="""Networking: Maximum message size (in MB) to allow in "control plane" communication; generally only applies to map output size information sent between executors and the driver. Increase this if you are running jobs with many thousands of map and reduce tasks and see messages about the RPC message size.""",
1836
+ description="""Networking: Maximum message size (in MiB) to allow in "control plane" communication; generally only applies to map output size information sent between executors and the driver. Increase this if you are running jobs with many thousands of map and reduce tasks and see messages about the RPC message size.""",
1191
1837
  is_required=False,
1192
1838
  ),
1193
1839
  }
1194
1840
  )
1195
1841
  ),
1196
- "numRetries": Field(
1197
- StringSource,
1198
- description="""Networking: Number of times to retry before an RPC task gives up. An RPC task will run at most times of this number.""",
1199
- is_required=False,
1200
- ),
1201
- "retry": Field(
1842
+ "io": Field(
1202
1843
  Permissive(
1203
1844
  fields={
1204
- "wait": Field(
1845
+ "backLog": Field(
1846
+ StringSource,
1847
+ description="""Networking: Length of the accept queue for the RPC server. For large applications, this value may need to be increased, so that incoming connections are not dropped when a large number of connections arrives in a short period of time.""",
1848
+ is_required=False,
1849
+ ),
1850
+ "connectionTimeout": Field(
1851
+ StringSource,
1852
+ description="""Networking: Timeout for the established connections between RPC peers to be marked as idled and closed if there are outstanding RPC requests but no traffic on the channel for at least `connectionTimeout`.""",
1853
+ is_required=False,
1854
+ ),
1855
+ "connectionCreationTimeout": Field(
1205
1856
  StringSource,
1206
- description="""Networking: Duration for an RPC ask operation to wait before retrying.""",
1857
+ description="""Networking: Timeout for establishing a connection between RPC peers.""",
1207
1858
  is_required=False,
1208
1859
  ),
1209
1860
  }
@@ -1238,7 +1889,28 @@ def spark_config():
1238
1889
  fields={
1239
1890
  "timeout": Field(
1240
1891
  StringSource,
1241
- description="""Networking: Default timeout for all network interactions. This config will be used in place of spark.core.connection.ack.wait.timeout, spark.storage.blockManagerSlaveTimeoutMs, spark.shuffle.io.connectionTimeout, spark.rpc.askTimeout or spark.rpc.lookupTimeout if they are not configured.""",
1892
+ description="""Networking: Default timeout for all network interactions. This config will be used in place of spark.storage.blockManagerHeartbeatTimeoutMs, spark.shuffle.io.connectionTimeout, spark.rpc.askTimeout or spark.rpc.lookupTimeout if they are not configured.""",
1893
+ is_required=False,
1894
+ ),
1895
+ "timeoutInterval": Field(
1896
+ StringSource,
1897
+ description="""Networking: Interval for the driver to check and expire dead executors.""",
1898
+ is_required=False,
1899
+ ),
1900
+ "io": Field(
1901
+ Permissive(
1902
+ fields={
1903
+ "preferDirectBufs": Field(
1904
+ StringSource,
1905
+ description="""Networking: If enabled then off-heap buffer allocations are preferred by the shared allocators. Off-heap buffers are used to reduce garbage collection during shuffle and cache block transfer. For environments where off-heap memory is tightly limited, users may wish to turn this off to force all allocations to be on-heap.""",
1906
+ is_required=False,
1907
+ ),
1908
+ }
1909
+ )
1910
+ ),
1911
+ "maxRemoteBlockSizeFetchToMem": Field(
1912
+ StringSource,
1913
+ description="""Networking: Remote block will be fetched to disk when size of the block is above this threshold in bytes. This is to avoid a giant request takes too much memory. Note this configuration will affect both shuffle fetch and block manager remote block fetch. For users who enabled external shuffle service, this feature can only work when external shuffle service is at least 2.3.0.""",
1242
1914
  is_required=False,
1243
1915
  ),
1244
1916
  }
@@ -1255,35 +1927,6 @@ def spark_config():
1255
1927
  }
1256
1928
  )
1257
1929
  ),
1258
- "core": Field(
1259
- Permissive(
1260
- fields={
1261
- "connection": Field(
1262
- Permissive(
1263
- fields={
1264
- "ack": Field(
1265
- Permissive(
1266
- fields={
1267
- "wait": Field(
1268
- Permissive(
1269
- fields={
1270
- "timeout": Field(
1271
- StringSource,
1272
- description="""Networking: How long for the connection to wait for ack to occur before timing out and giving up. To avoid unwilling timeout caused by long pause like GC, you can set larger value.""",
1273
- is_required=False,
1274
- ),
1275
- }
1276
- )
1277
- ),
1278
- }
1279
- )
1280
- ),
1281
- }
1282
- )
1283
- ),
1284
- }
1285
- )
1286
- ),
1287
1930
  "cores": Field(
1288
1931
  Permissive(
1289
1932
  fields={
@@ -1364,7 +2007,106 @@ def spark_config():
1364
2007
  fields={
1365
2008
  "capacity": Field(
1366
2009
  StringSource,
1367
- description="""Scheduling: Capacity for event queue in Spark listener bus, must be greater than 0. Consider increasing value (e.g. 20000) if listener events are dropped. Increasing this value may result in the driver using more memory.""",
2010
+ description="""Scheduling: The default capacity for event queues. Spark will try to initialize an event queue using capacity specified by `spark.scheduler.listenerbus.eventqueue.queueName.capacity` first. If it's not configured, Spark will use the default capacity specified by this config. Note that capacity must be greater than 0. Consider increasing value (e.g. 20000) if listener events are dropped. Increasing this value may result in the driver using more memory.""",
2011
+ is_required=False,
2012
+ ),
2013
+ "shared": Field(
2014
+ Permissive(
2015
+ fields={
2016
+ "capacity": Field(
2017
+ StringSource,
2018
+ description="""Scheduling: Capacity for shared event queue in Spark listener bus, which hold events for external listener(s) that register to the listener bus. Consider increasing value, if the listener events corresponding to shared queue are dropped. Increasing this value may result in the driver using more memory.""",
2019
+ is_required=False,
2020
+ ),
2021
+ }
2022
+ )
2023
+ ),
2024
+ "appStatus": Field(
2025
+ Permissive(
2026
+ fields={
2027
+ "capacity": Field(
2028
+ StringSource,
2029
+ description="""Scheduling: Capacity for appStatus event queue, which hold events for internal application status listeners. Consider increasing value, if the listener events corresponding to appStatus queue are dropped. Increasing this value may result in the driver using more memory.""",
2030
+ is_required=False,
2031
+ ),
2032
+ }
2033
+ )
2034
+ ),
2035
+ "executorManagement": Field(
2036
+ Permissive(
2037
+ fields={
2038
+ "capacity": Field(
2039
+ StringSource,
2040
+ description="""Scheduling: Capacity for executorManagement event queue in Spark listener bus, which hold events for internal executor management listeners. Consider increasing value if the listener events corresponding to executorManagement queue are dropped. Increasing this value may result in the driver using more memory.""",
2041
+ is_required=False,
2042
+ ),
2043
+ }
2044
+ )
2045
+ ),
2046
+ "eventLog": Field(
2047
+ Permissive(
2048
+ fields={
2049
+ "capacity": Field(
2050
+ StringSource,
2051
+ description="""Scheduling: Capacity for eventLog queue in Spark listener bus, which hold events for Event logging listeners that write events to eventLogs. Consider increasing value if the listener events corresponding to eventLog queue are dropped. Increasing this value may result in the driver using more memory.""",
2052
+ is_required=False,
2053
+ ),
2054
+ }
2055
+ )
2056
+ ),
2057
+ "streams": Field(
2058
+ Permissive(
2059
+ fields={
2060
+ "capacity": Field(
2061
+ StringSource,
2062
+ description="""Scheduling: Capacity for streams queue in Spark listener bus, which hold events for internal streaming listener. Consider increasing value if the listener events corresponding to streams queue are dropped. Increasing this value may result in the driver using more memory.""",
2063
+ is_required=False,
2064
+ ),
2065
+ }
2066
+ )
2067
+ ),
2068
+ }
2069
+ )
2070
+ ),
2071
+ }
2072
+ )
2073
+ ),
2074
+ "resource": Field(
2075
+ Permissive(
2076
+ fields={
2077
+ "profileMergeConflicts": Field(
2078
+ StringSource,
2079
+ description="""Scheduling: If set to "true", Spark will merge ResourceProfiles when different profiles are specified in RDDs that get combined into a single stage. When they are merged, Spark chooses the maximum of each resource and creates a new ResourceProfile. The default of false results in Spark throwing an exception if multiple different ResourceProfiles are found in RDDs going into the same stage.""",
2080
+ is_required=False,
2081
+ ),
2082
+ }
2083
+ )
2084
+ ),
2085
+ "excludeOnFailure": Field(
2086
+ Permissive(
2087
+ fields={
2088
+ "unschedulableTaskSetTimeout": Field(
2089
+ StringSource,
2090
+ description="""Scheduling: The timeout in seconds to wait to acquire a new executor and schedule a task before aborting a TaskSet which is unschedulable because all executors are excluded due to task failures.""",
2091
+ is_required=False,
2092
+ ),
2093
+ }
2094
+ )
2095
+ ),
2096
+ "barrier": Field(
2097
+ Permissive(
2098
+ fields={
2099
+ "maxConcurrentTasksCheck": Field(
2100
+ Permissive(
2101
+ fields={
2102
+ "interval": Field(
2103
+ StringSource,
2104
+ description="""Barrier Execution Mode: Time in seconds to wait between a max concurrent tasks check failure and the next check. A max concurrent tasks check ensures the cluster can launch more concurrent tasks than required by a barrier stage on job submitted. The check can fail in case a cluster has just started and not enough executors have registered, so we wait for a little while and try to perform the check again. If the check fails more than a configured max failure times for a job then fail current job submission. Note this config only applies to jobs that contain one or more barrier stages, we won't perform the check on non-barrier jobs.""",
2105
+ is_required=False,
2106
+ ),
2107
+ "maxFailures": Field(
2108
+ StringSource,
2109
+ description="""Barrier Execution Mode: Number of max concurrent tasks check failures allowed before fail a job submission. A max concurrent tasks check ensures the cluster can launch more concurrent tasks than required by a barrier stage on job submitted. The check can fail in case a cluster has just started and not enough executors have registered, so we wait for a little while and try to perform the check again. If the check fails more than a configured max failure times for a job then fail current job submission. Note this config only applies to jobs that contain one or more barrier stages, we won't perform the check on non-barrier jobs.""",
1368
2110
  is_required=False,
1369
2111
  ),
1370
2112
  }
@@ -1376,17 +2118,34 @@ def spark_config():
1376
2118
  }
1377
2119
  )
1378
2120
  ),
1379
- "blacklist": Field(
2121
+ "standalone": Field(
2122
+ Permissive(
2123
+ fields={
2124
+ "submit": Field(
2125
+ Permissive(
2126
+ fields={
2127
+ "waitAppCompletion": Field(
2128
+ StringSource,
2129
+ description="""Scheduling: If set to true, Spark will merge ResourceProfiles when different profiles are specified in RDDs that get combined into a single stage. When they are merged, Spark chooses the maximum of each resource and creates a new ResourceProfile. The default of false results in Spark throwing an exception if multiple different ResourceProfiles are found in RDDs going into the same stage.""",
2130
+ is_required=False,
2131
+ ),
2132
+ }
2133
+ )
2134
+ ),
2135
+ }
2136
+ )
2137
+ ),
2138
+ "excludeOnFailure": Field(
1380
2139
  Permissive(
1381
2140
  fields={
1382
2141
  "enabled": Field(
1383
2142
  StringSource,
1384
- description="""Scheduling: If set to "true", prevent Spark from scheduling tasks on executors that have been blacklisted due to too many task failures. The blacklisting algorithm can be further controlled by the other "spark.blacklist" configuration options.""",
2143
+ description="""Scheduling: If set to "true", prevent Spark from scheduling tasks on executors that have been excluded due to too many task failures. The algorithm used to exclude executors and nodes can be further controlled by the other "spark.excludeOnFailure" configuration options.""",
1385
2144
  is_required=False,
1386
2145
  ),
1387
2146
  "timeout": Field(
1388
2147
  StringSource,
1389
- description="""Scheduling: (Experimental) How long a node or executor is blacklisted for the entire application, before it is unconditionally removed from the blacklist to attempt running new tasks.""",
2148
+ description="""Scheduling: (Experimental) How long a node or executor is excluded for the entire application, before it is unconditionally removed from the excludelist to attempt running new tasks.""",
1390
2149
  is_required=False,
1391
2150
  ),
1392
2151
  "task": Field(
@@ -1394,12 +2153,12 @@ def spark_config():
1394
2153
  fields={
1395
2154
  "maxTaskAttemptsPerExecutor": Field(
1396
2155
  StringSource,
1397
- description="""Scheduling: (Experimental) For a given task, how many times it can be retried on one executor before the executor is blacklisted for that task.""",
2156
+ description="""Scheduling: (Experimental) For a given task, how many times it can be retried on one executor before the executor is excluded for that task.""",
1398
2157
  is_required=False,
1399
2158
  ),
1400
2159
  "maxTaskAttemptsPerNode": Field(
1401
2160
  StringSource,
1402
- description="""Scheduling: (Experimental) For a given task, how many times it can be retried on one node, before the entire node is blacklisted for that task.""",
2161
+ description="""Scheduling: (Experimental) For a given task, how many times it can be retried on one node, before the entire node is excluded for that task.""",
1403
2162
  is_required=False,
1404
2163
  ),
1405
2164
  }
@@ -1410,12 +2169,12 @@ def spark_config():
1410
2169
  fields={
1411
2170
  "maxFailedTasksPerExecutor": Field(
1412
2171
  StringSource,
1413
- description="""Scheduling: (Experimental) How many different tasks must fail on one executor, within one stage, before the executor is blacklisted for that stage.""",
2172
+ description="""Scheduling: (Experimental) How many different tasks must fail on one executor, within one stage, before the executor is excluded for that stage.""",
1414
2173
  is_required=False,
1415
2174
  ),
1416
2175
  "maxFailedExecutorsPerNode": Field(
1417
2176
  StringSource,
1418
- description="""Scheduling: (Experimental) How many different executors are marked as blacklisted for a given stage, before the entire node is marked as failed for the stage.""",
2177
+ description="""Scheduling: (Experimental) How many different executors are marked as excluded for a given stage, before the entire node is marked as failed for the stage.""",
1419
2178
  is_required=False,
1420
2179
  ),
1421
2180
  }
@@ -1426,12 +2185,12 @@ def spark_config():
1426
2185
  fields={
1427
2186
  "maxFailedTasksPerExecutor": Field(
1428
2187
  StringSource,
1429
- description="""Scheduling: (Experimental) How many different tasks must fail on one executor, in successful task sets, before the executor is blacklisted for the entire application. Blacklisted executors will be automatically added back to the pool of available resources after the timeout specified by spark.blacklist.timeout. Note that with dynamic allocation, though, the executors may get marked as idle and be reclaimed by the cluster manager.""",
2188
+ description="""Scheduling: (Experimental) How many different tasks must fail on one executor, in successful task sets, before the executor is excluded for the entire application. Excluded executors will be automatically added back to the pool of available resources after the timeout specified by spark.excludeOnFailure.timeout. Note that with dynamic allocation, though, the executors may get marked as idle and be reclaimed by the cluster manager.""",
1430
2189
  is_required=False,
1431
2190
  ),
1432
2191
  "maxFailedExecutorsPerNode": Field(
1433
2192
  StringSource,
1434
- description="""Scheduling: (Experimental) How many different executors must be blacklisted for the entire application, before the node is blacklisted for the entire application. Blacklisted nodes will be automatically added back to the pool of available resources after the timeout specified by spark.blacklist.timeout. Note that with dynamic allocation, though, the executors on the node may get marked as idle and be reclaimed by the cluster manager.""",
2193
+ description="""Scheduling: (Experimental) How many different executors must be excluded for the entire application, before the node is excluded for the entire application. Excluded nodes will be automatically added back to the pool of available resources after the timeout specified by spark.excludeOnFailure.timeout. Note that with dynamic allocation, though, the executors on the node may get marked as idle and be reclaimed by the cluster manager.""",
1435
2194
  is_required=False,
1436
2195
  ),
1437
2196
  "fetchFailure": Field(
@@ -1439,7 +2198,7 @@ def spark_config():
1439
2198
  fields={
1440
2199
  "enabled": Field(
1441
2200
  StringSource,
1442
- description="""Scheduling: (Experimental) If set to "true", Spark will blacklist the executor immediately when a fetch failure happens. If external shuffle service is enabled, then the whole node will be blacklisted.""",
2201
+ description="""Scheduling: (Experimental) If set to "true", Spark will exclude the executor immediately when a fetch failure happens. If external shuffle service is enabled, then the whole node will be excluded.""",
1443
2202
  is_required=False,
1444
2203
  ),
1445
2204
  }
@@ -1448,9 +2207,9 @@ def spark_config():
1448
2207
  }
1449
2208
  )
1450
2209
  ),
1451
- "killBlacklistedExecutors": Field(
2210
+ "killExcludedExecutors": Field(
1452
2211
  StringSource,
1453
- description="""Scheduling: (Experimental) If set to "true", allow Spark to automatically kill the executors when they are blacklisted on fetch failure or blacklisted for the entire application, as controlled by spark.blacklist.application.*. Note that, when an entire node is added to the blacklist, all of the executors on that node will be killed.""",
2212
+ description="""Scheduling: (Experimental) If set to "true", allow Spark to automatically kill the executors when they are excluded on fetch failure or excluded for the entire application, as controlled by spark.killExcludedExecutors.application.*. Note that, when an entire node is added excluded, all of the executors on that node will be killed.""",
1454
2213
  is_required=False,
1455
2214
  ),
1456
2215
  }
@@ -1479,6 +2238,49 @@ def spark_config():
1479
2238
  description="""Scheduling: Fraction of tasks which must be complete before speculation is enabled for a particular stage.""",
1480
2239
  is_required=False,
1481
2240
  ),
2241
+ "minTaskRuntime": Field(
2242
+ StringSource,
2243
+ description="""Scheduling: Minimum amount of time a task runs before being considered for speculation. This can be used to avoid launching speculative copies of tasks that are very short.""",
2244
+ is_required=False,
2245
+ ),
2246
+ "task": Field(
2247
+ Permissive(
2248
+ fields={
2249
+ "duration": Field(
2250
+ Permissive(
2251
+ fields={
2252
+ "threshold": Field(
2253
+ StringSource,
2254
+ description="""Scheduling: Task duration after which scheduler would try to speculative run the task. If provided, tasks would be speculatively run if current stage contains less tasks than or equal to the number of slots on a single executor and the task is taking longer time than the threshold. This config helps speculate stage with very few tasks. Regular speculation configs may also apply if the executor slots are large enough. E.g. tasks might be re-launched if there are enough successful runs even though the threshold hasn't been reached. The number of slots is computed based on the conf values of spark.executor.cores and spark.task.cpus minimum 1. Default unit is bytes, unless otherwise specified.""",
2255
+ is_required=False,
2256
+ ),
2257
+ }
2258
+ )
2259
+ ),
2260
+ }
2261
+ )
2262
+ ),
2263
+ "efficiency": Field(
2264
+ Permissive(
2265
+ fields={
2266
+ "processRateMultiplier": Field(
2267
+ StringSource,
2268
+ description="""Scheduling: A multiplier that used when evaluating inefficient tasks. The higher the multiplier is, the more tasks will be possibly considered as inefficient.""",
2269
+ is_required=False,
2270
+ ),
2271
+ "longRunTaskFactor": Field(
2272
+ StringSource,
2273
+ description="""Scheduling: A task will be speculated anyway as long as its duration has exceeded the value of multiplying the factor and the time threshold (either be spark.speculation.multiplier * successfulTaskDurations.median or spark.speculation.minTaskRuntime) regardless of it's data process rate is good or not. This avoids missing the inefficient tasks when task slow isn't related to data process rate.""",
2274
+ is_required=False,
2275
+ ),
2276
+ "enabled": Field(
2277
+ StringSource,
2278
+ description="""Scheduling: When set to true, spark will evaluate the efficiency of task processing through the stage task metrics or its duration, and only need to speculate the inefficient tasks. A task is inefficient when 1)its data process rate is less than the average data process rate of all successful tasks in the stage multiplied by a multiplier or 2)its duration has exceeded the value of multiplying spark.speculation.efficiency.longRunTaskFactor and the time threshold (either be spark.speculation.multiplier * successfulTaskDurations.median or spark.speculation.minTaskRuntime).""",
2279
+ is_required=False,
2280
+ ),
2281
+ }
2282
+ )
2283
+ ),
1482
2284
  }
1483
2285
  )
1484
2286
  ),
@@ -1490,9 +2292,26 @@ def spark_config():
1490
2292
  description="""Scheduling: Number of cores to allocate for each task.""",
1491
2293
  is_required=False,
1492
2294
  ),
2295
+ "resource": Field(
2296
+ Permissive(
2297
+ fields={
2298
+ "{resourceName}": Field(
2299
+ Permissive(
2300
+ fields={
2301
+ "amount": Field(
2302
+ StringSource,
2303
+ description="""Scheduling: Amount of a particular resource type to allocate for each task, note that this can be a double. If this is specified you must also provide the executor config spark.executor.resource.{resourceName}.amount and any corresponding discovery configs so that your executors are created with that resource type. In addition to whole amounts, a fractional amount (for example, 0.25, which means 1/4th of a resource) may be specified. Fractional amounts must be less than or equal to 0.5, or in other words, the minimum amount of resource sharing is 2 tasks per resource. Additionally, fractional amounts are floored in order to assign resource slots (e.g. a 0.2222 configuration, or 1/0.2222 slots will become 4 tasks/resource, not 5).""",
2304
+ is_required=False,
2305
+ ),
2306
+ }
2307
+ )
2308
+ ),
2309
+ }
2310
+ )
2311
+ ),
1493
2312
  "maxFailures": Field(
1494
2313
  StringSource,
1495
- description="""Scheduling: Number of failures of any particular task before giving up on the job. The total number of failures spread across different tasks will not cause the job to fail; a particular task has to fail this number of attempts. Should be greater than or equal to 1. Number of allowed retries = this value - 1.""",
2314
+ description="""Scheduling: Number of continuous failures of any particular task before giving up on the job. The total number of failures spread across different tasks will not cause the job to fail; a particular task has to fail this number of attempts continuously. If any attempt succeeds, the failure count for the task will be reset. Should be greater than or equal to 1. Number of allowed retries = this value - 1.""",
1496
2315
  is_required=False,
1497
2316
  ),
1498
2317
  "reaper": Field(
@@ -1532,6 +2351,28 @@ def spark_config():
1532
2351
  description="""Scheduling: Number of consecutive stage attempts allowed before a stage is aborted.""",
1533
2352
  is_required=False,
1534
2353
  ),
2354
+ "ignoreDecommissionFetchFailure": Field(
2355
+ StringSource,
2356
+ description="""Scheduling: Whether ignore stage fetch failure caused by executor decommission when count spark.stage.maxConsecutiveAttempts""",
2357
+ is_required=False,
2358
+ ),
2359
+ }
2360
+ )
2361
+ ),
2362
+ "barrier": Field(
2363
+ Permissive(
2364
+ fields={
2365
+ "sync": Field(
2366
+ Permissive(
2367
+ fields={
2368
+ "timeout": Field(
2369
+ StringSource,
2370
+ description="""Barrier Execution Mode: The timeout in seconds for each barrier() call from a barrier task. If the coordinator didn't receive all the sync messages from barrier tasks within the configured time, throw a SparkException to fail all the tasks. The default value is set to 31536000(3600 * 24 * 365) so the barrier() call shall wait for one year.""",
2371
+ is_required=False,
2372
+ ),
2373
+ }
2374
+ )
2375
+ ),
1535
2376
  }
1536
2377
  )
1537
2378
  ),
@@ -1540,7 +2381,7 @@ def spark_config():
1540
2381
  fields={
1541
2382
  "enabled": Field(
1542
2383
  StringSource,
1543
- description="""Dynamic Allocation: Whether to use dynamic resource allocation, which scales the number of executors registered with this application up and down based on the workload. For more detail, see the description here. This requires spark.shuffle.service.enabled to be set. The following configurations are also relevant: spark.dynamicAllocation.minExecutors, spark.dynamicAllocation.maxExecutors, and spark.dynamicAllocation.initialExecutors spark.dynamicAllocation.executorAllocationRatio""",
2384
+ description="""Dynamic Allocation: Whether to use dynamic resource allocation, which scales the number of executors registered with this application up and down based on the workload. For more detail, see the description here. This requires one of the following conditions: 1) enabling external shuffle service through spark.shuffle.service.enabled, or 2) enabling shuffle tracking through spark.dynamicAllocation.shuffleTracking.enabled, or 3) enabling shuffle blocks decommission through spark.decommission.enabled and spark.storage.decommission.shuffleBlocks.enabled, or 4) (Experimental) configuring spark.shuffle.sort.io.plugin.class to use a custom ShuffleDataIO who's ShuffleDriverComponents supports reliable storage. The following configurations are also relevant: spark.dynamicAllocation.minExecutors, spark.dynamicAllocation.maxExecutors, and spark.dynamicAllocation.initialExecutors spark.dynamicAllocation.executorAllocationRatio""",
1544
2385
  is_required=False,
1545
2386
  ),
1546
2387
  "executorIdleTimeout": Field(
@@ -1583,6 +2424,22 @@ def spark_config():
1583
2424
  description="""Dynamic Allocation: Same as spark.dynamicAllocation.schedulerBacklogTimeout, but used only for subsequent executor requests. For more detail, see this description.""",
1584
2425
  is_required=False,
1585
2426
  ),
2427
+ "shuffleTracking": Field(
2428
+ Permissive(
2429
+ fields={
2430
+ "enabled": Field(
2431
+ StringSource,
2432
+ description="""Dynamic Allocation: Enables shuffle file tracking for executors, which allows dynamic allocation without the need for an external shuffle service. This option will try to keep alive executors that are storing shuffle data for active jobs.""",
2433
+ is_required=False,
2434
+ ),
2435
+ "timeout": Field(
2436
+ StringSource,
2437
+ description="""Dynamic Allocation: When shuffle tracking is enabled, controls the timeout for executors that are holding shuffle data. The default value means that Spark will rely on the shuffles being garbage collected to be able to release executors. If for some reason garbage collection is not cleaning up shuffles quickly enough, this option can be used to control when to time out executors even when they are storing shuffle data.""",
2438
+ is_required=False,
2439
+ ),
2440
+ }
2441
+ )
2442
+ ),
1586
2443
  }
1587
2444
  )
1588
2445
  ),