@@ -324,11 +324,11 @@ object SQLConf {
324
324
.doc(" Configures the maximum size in bytes for a table that will be broadcast to all worker " +
325
325
" nodes when performing a join. By setting this value to -1 broadcasting can be disabled. " +
326
326
" Note that currently statistics are only supported for Hive Metastore tables where the " +
327
- " command <code> ANALYZE TABLE < tableName> COMPUTE STATISTICS noscan</code> has been " +
327
+ " command ` ANALYZE TABLE < tableName> COMPUTE STATISTICS noscan` has been " +
328
328
" run, and file-based data source tables where the statistics are computed directly on " +
329
329
" the files of data." )
330
330
.bytesConf(ByteUnit .BYTE )
331
- .createWithDefault( 10L * 1024 * 1024 )
331
+ .createWithDefaultString( " 10MB " )
332
332
333
333
val LIMIT_SCALE_UP_FACTOR = buildConf(" spark.sql.limit.scaleUpFactor" )
334
334
.internal()
@@ -402,7 +402,7 @@ object SQLConf {
402
402
s " an effect when ' ${ADAPTIVE_EXECUTION_ENABLED .key}' and " +
403
403
s " ' ${REDUCE_POST_SHUFFLE_PARTITIONS_ENABLED .key}' is enabled. " )
404
404
.bytesConf(ByteUnit .BYTE )
405
- .createWithDefault( 64 * 1024 * 1024 )
405
+ .createWithDefaultString( " 64MB " )
406
406
407
407
val SHUFFLE_MAX_NUM_POSTSHUFFLE_PARTITIONS =
408
408
buildConf(" spark.sql.adaptive.shuffle.maxNumPostShufflePartitions" )
@@ -436,7 +436,7 @@ object SQLConf {
436
436
.doc(" Configures the minimum size in bytes for a partition that is considered as a skewed " +
437
437
" partition in adaptive skewed join." )
438
438
.bytesConf(ByteUnit .BYTE )
439
- .createWithDefault( 64 * 1024 * 1024 )
439
+ .createWithDefaultString( " 64MB " )
440
440
441
441
val ADAPTIVE_EXECUTION_SKEWED_PARTITION_FACTOR =
442
442
buildConf(" spark.sql.adaptive.optimizeSkewedJoin.skewedPartitionFactor" )
@@ -770,7 +770,7 @@ object SQLConf {
770
770
val BROADCAST_TIMEOUT = buildConf(" spark.sql.broadcastTimeout" )
771
771
.doc(" Timeout in seconds for the broadcast wait time in broadcast joins." )
772
772
.timeConf(TimeUnit .SECONDS )
773
- .createWithDefault( 5 * 60 )
773
+ .createWithDefaultString( s " ${ 5 * 60 } " )
774
774
775
775
// This is only used for the thriftserver
776
776
val THRIFTSERVER_POOL = buildConf(" spark.sql.thriftserver.scheduler.pool" )
@@ -830,7 +830,7 @@ object SQLConf {
830
830
.createWithDefault(true )
831
831
832
832
val BUCKETING_MAX_BUCKETS = buildConf(" spark.sql.sources.bucketing.maxBuckets" )
833
- .doc(" The maximum number of buckets allowed. Defaults to 100000 " )
833
+ .doc(" The maximum number of buckets allowed." )
834
834
.intConf
835
835
.checkValue(_ > 0 , " the value of spark.sql.sources.bucketing.maxBuckets must be greater than 0" )
836
836
.createWithDefault(100000 )
@@ -1022,7 +1022,7 @@ object SQLConf {
1022
1022
" This configuration is effective only when using file-based sources such as Parquet, JSON " +
1023
1023
" and ORC." )
1024
1024
.bytesConf(ByteUnit .BYTE )
1025
- .createWithDefault( 128 * 1024 * 1024 ) // parquet.block.size
1025
+ .createWithDefaultString( " 128MB " ) // parquet.block.size
1026
1026
1027
1027
val FILES_OPEN_COST_IN_BYTES = buildConf(" spark.sql.files.openCostInBytes" )
1028
1028
.internal()
@@ -1161,7 +1161,8 @@ object SQLConf {
1161
1161
1162
1162
val VARIABLE_SUBSTITUTE_ENABLED =
1163
1163
buildConf(" spark.sql.variable.substitute" )
1164
- .doc(" This enables substitution using syntax like ${var} ${system:var} and ${env:var}." )
1164
+ .doc(" This enables substitution using syntax like `${var}`, `${system:var}`, " +
1165
+ " and `${env:var}`." )
1165
1166
.booleanConf
1166
1167
.createWithDefault(true )
1167
1168
@@ -1171,7 +1172,7 @@ object SQLConf {
1171
1172
.doc(" Enable two-level aggregate hash map. When enabled, records will first be " +
1172
1173
" inserted/looked-up at a 1st-level, small, fast map, and then fallback to a " +
1173
1174
" 2nd-level, larger, slower map when 1st level is full or keys cannot be found. " +
1174
- " When disabled, records go directly to the 2nd level. Defaults to true. " )
1175
+ " When disabled, records go directly to the 2nd level." )
1175
1176
.booleanConf
1176
1177
.createWithDefault(true )
1177
1178
@@ -1325,10 +1326,10 @@ object SQLConf {
1325
1326
1326
1327
val STREAMING_STOP_TIMEOUT =
1327
1328
buildConf(" spark.sql.streaming.stopTimeout" )
1328
- .doc(" How long to wait for the streaming execution thread to stop when calling the " +
1329
- " streaming query's stop() method in milliseconds . 0 or negative values wait indefinitely." )
1329
+ .doc(" How long to wait in milliseconds for the streaming execution thread to stop when " +
1330
+ " calling the streaming query's stop() method. 0 or negative values wait indefinitely." )
1330
1331
.timeConf(TimeUnit .MILLISECONDS )
1331
- .createWithDefault( 0L )
1332
+ .createWithDefaultString( " 0 " )
1332
1333
1333
1334
val STREAMING_NO_DATA_PROGRESS_EVENT_INTERVAL =
1334
1335
buildConf(" spark.sql.streaming.noDataProgressEventInterval" )
@@ -1611,10 +1612,10 @@ object SQLConf {
1611
1612
val PANDAS_UDF_BUFFER_SIZE =
1612
1613
buildConf(" spark.sql.execution.pandas.udf.buffer.size" )
1613
1614
.doc(
1614
- s " Same as ${BUFFER_SIZE } but only applies to Pandas UDF executions. If it is not set, " +
1615
- s " the fallback is ${BUFFER_SIZE } . Note that Pandas execution requires more than 4 bytes. " +
1616
- " Lowering this value could make small Pandas UDF batch iterated and pipelined; however, " +
1617
- " it might degrade performance. See SPARK-27870." )
1615
+ s " Same as ` ${BUFFER_SIZE .key} ` but only applies to Pandas UDF executions. If it is not " +
1616
+ s " set, the fallback is ` ${BUFFER_SIZE .key} ` . Note that Pandas execution requires more " +
1617
+ " than 4 bytes. Lowering this value could make small Pandas UDF batch iterated and " +
1618
+ " pipelined; however, it might degrade performance. See SPARK-27870." )
1618
1619
.fallbackConf(BUFFER_SIZE )
1619
1620
1620
1621
val PANDAS_GROUPED_MAP_ASSIGN_COLUMNS_BY_NAME =
@@ -2039,7 +2040,7 @@ object SQLConf {
2039
2040
.checkValue(i => i >= 0 && i <= ByteArrayMethods .MAX_ROUNDED_ARRAY_LENGTH , " Invalid " +
2040
2041
" value for 'spark.sql.maxPlanStringLength'. Length must be a valid string length " +
2041
2042
" (nonnegative and shorter than the maximum size)." )
2042
- .createWithDefault( ByteArrayMethods .MAX_ROUNDED_ARRAY_LENGTH )
2043
+ .createWithDefaultString( s " ${ ByteArrayMethods .MAX_ROUNDED_ARRAY_LENGTH } " )
2043
2044
2044
2045
val SET_COMMAND_REJECTS_SPARK_CORE_CONFS =
2045
2046
buildConf(" spark.sql.legacy.setCommandRejectsSparkCoreConfs" )
0 commit comments