This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new c3a288281071 [SPARK-45215][SQL][TESTS] Combine HiveCatalogedDDLSuite
and HiveDDLSuite
c3a288281071 is described below
commit c3a288281071c47e9f86f4fe7892875c2a6d364f
Author: panbingkun <[email protected]>
AuthorDate: Tue Sep 19 10:46:08 2023 -0700
[SPARK-45215][SQL][TESTS] Combine HiveCatalogedDDLSuite and HiveDDLSuite
### What changes were proposed in this pull request?
The pr aims to combine `HiveCatalogedDDLSuite` and `HiveDDLSuite`.
### Why are the changes needed?
- Complete historical legacy work and make code clearer.
- Put Hive's DDL operation logic into one UT class.
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
- Manually test.
- Pass GA.
### Was this patch authored or co-authored using generative AI tooling?
No.
Closes #42992 from panbingkun/combine_HiveCatalogedDDLSuite_HiveDDLSuite.
Authored-by: panbingkun <[email protected]>
Signed-off-by: Dongjoon Hyun <[email protected]>
---
.../spark/sql/hive/execution/HiveDDLSuite.scala | 54 +++++++++-------------
1 file changed, 21 insertions(+), 33 deletions(-)
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
index 7a4a339e9370..99250ed6a91b 100644
---
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
+++
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
@@ -26,7 +26,7 @@ import
org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.{SparkException, SparkUnsupportedOperationException}
-import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode}
+import org.apache.spark.sql.{AnalysisException, Row, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
import org.apache.spark.sql.catalyst.catalog._
@@ -49,9 +49,12 @@ import org.apache.spark.sql.types._
import org.apache.spark.tags.SlowHiveTest
import org.apache.spark.util.Utils
-// TODO(gatorsmile): combine HiveCatalogedDDLSuite and HiveDDLSuite
@SlowHiveTest
-class HiveCatalogedDDLSuite extends DDLSuite with TestHiveSingleton with
BeforeAndAfterEach {
+class HiveDDLSuite
+ extends DDLSuite with SQLTestUtils with TestHiveSingleton with
BeforeAndAfterEach {
+ import testImplicits._
+ val hiveFormats = Seq("PARQUET", "ORC", "TEXTFILE", "SEQUENCEFILE",
"RCFILE", "AVRO")
+
override def afterEach(): Unit = {
try {
// drop all databases, tables and functions after each test
@@ -135,6 +138,21 @@ class HiveCatalogedDDLSuite extends DDLSuite with
TestHiveSingleton with BeforeA
)
}
+ // check if the directory for recording the data of the table exists.
+ private def tableDirectoryExists(
+ tableIdentifier: TableIdentifier,
+ dbPath: Option[String] = None): Boolean = {
+ val expectedTablePath =
+ if (dbPath.isEmpty) {
+ hiveContext.sessionState.catalog.defaultTablePath(tableIdentifier)
+ } else {
+ new Path(new Path(dbPath.get), tableIdentifier.table).toUri
+ }
+ val filesystemPath = new Path(expectedTablePath.toString)
+ val fs = filesystemPath.getFileSystem(spark.sessionState.newHadoopConf())
+ fs.exists(filesystemPath)
+ }
+
test("alter table: set properties") {
testSetProperties(isDatasourceTable = false)
}
@@ -372,36 +390,6 @@ class HiveCatalogedDDLSuite extends DDLSuite with
TestHiveSingleton with BeforeA
catalog.reset()
}
}
-}
-
-@SlowHiveTest
-class HiveDDLSuite
- extends QueryTest with SQLTestUtils with TestHiveSingleton with
BeforeAndAfterEach {
- import testImplicits._
- val hiveFormats = Seq("PARQUET", "ORC", "TEXTFILE", "SEQUENCEFILE",
"RCFILE", "AVRO")
-
- override def afterEach(): Unit = {
- try {
- // drop all databases, tables and functions after each test
- spark.sessionState.catalog.reset()
- } finally {
- super.afterEach()
- }
- }
- // check if the directory for recording the data of the table exists.
- private def tableDirectoryExists(
- tableIdentifier: TableIdentifier,
- dbPath: Option[String] = None): Boolean = {
- val expectedTablePath =
- if (dbPath.isEmpty) {
- hiveContext.sessionState.catalog.defaultTablePath(tableIdentifier)
- } else {
- new Path(new Path(dbPath.get), tableIdentifier.table).toUri
- }
- val filesystemPath = new Path(expectedTablePath.toString)
- val fs = filesystemPath.getFileSystem(spark.sessionState.newHadoopConf())
- fs.exists(filesystemPath)
- }
test("drop tables") {
withTable("tab1") {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]