查看当前环境SQL参数的配置
spark.sql("SET -v")
key | value |
spark.sql.hive.version | 1.2.1 |
spark.sql.sources.parallelPartitionDiscovery.threshold | 32 |
spark.sql.hive.metastore.barrierPrefixes | |
spark.sql.shuffle.partitions | 200 |
spark.sql.hive.metastorePartitionPruning | FALSE |
spark.sql.broadcastTimeout | 300 |
spark.sql.sources.bucketing.enabled | TRUE |
spark.sql.parquet.filterPushdown | TRUE |
spark.sql.statistics.fallBackToHdfs | FALSE |
spark.sql.adaptive.enabled | FALSE |
spark.sql.parquet.cacheMetadata | TRUE |
spark.sql.hive.metastore.sharedPrefixes | com.mysql.jdbc |
spark.sql.parquet.respectSummaryFiles | FALSE |
spark.sql.warehouse.dir | hdfs:///user/spark/warehouse |
spark.sql.orderByOrdinal | TRUE |
spark.sql.hive.convertMetastoreParquet | TRUE |
spark.sql.groupByOrdinal | TRUE |
spark.sql.hive.thriftServer.async | TRUE |
spark.sql.thriftserver.scheduler.pool | <undefined> |
spark.sql.orc.filterPushdown | FALSE |
spark.sql.adaptive.shuffle.targetPostShuffleInputSize | 67108864b |
spark.sql.sources.default | parquet |
spark.sql.parquet.compression.codec | snappy |
spark.sql.hive.metastore.version | 1.2.1 |
spark.sql.sources.partitionDiscovery.enabled | TRUE |
spark.sql.crossJoin.enabled | FALSE |
spark.sql.parquet.writeLegacyFormat | FALSE |
spark.sql.hive.verifyPartitionPath | FALSE |
spark.sql.variable.substitute | TRUE |
spark.sql.thriftserver.ui.retainedStatements | 200 |
spark.sql.hive.convertMetastoreParquet.mergeSchema | FALSE |
spark.sql.parquet.enableVectorizedReader | TRUE |
spark.sql.parquet.mergeSchema | FALSE |
spark.sql.parquet.binaryAsString | FALSE |
spark.sql.columnNameOfCorruptRecord | _corrupt_record |
spark.sql.files.maxPartitionBytes | 134217728 |
spark.sql.streaming.checkpointLocation | <undefined> |
spark.sql.variable.substitute.depth | 40 |
spark.sql.parquet.int96AsTimestamp | TRUE |
spark.sql.autoBroadcastJoinThreshold | 10485760 |
spark.sql.pivotMaxValues | 10000 |
spark.sql.sources.partitionColumnTypeInference.enabled | TRUE |
spark.sql.hive.metastore.jars | builtin |
spark.sql.thriftserver.ui.retainedSessions | 200 |
spark.sql.sources.maxConcurrentWrites | 1 |
spark.sql.parquet.output.committer.class | org.apache.parquet.hadoop.ParquetOutputCommitter |