jackye1995 commented on code in PR #6880: URL: https://github.com/apache/iceberg/pull/6880#discussion_r1119386521
########## delta-lake/src/integration/java/org/apache/iceberg/delta/TestSnapshotDeltaLakeTable.java: ########## @@ -121,91 +117,28 @@ public TestSnapshotDeltaLakeTable( } @Before - public void before() throws IOException { - File partitionedFolder = temp1.newFolder(); - File unpartitionedFolder = temp2.newFolder(); - File newIcebergTableFolder = temp3.newFolder(); - File externalDataFilesTableFolder = temp4.newFolder(); - File typeTestTableFolder = temp5.newFolder(); - partitionedLocation = partitionedFolder.toURI().toString(); - unpartitionedLocation = unpartitionedFolder.toURI().toString(); - newIcebergTableLocation = newIcebergTableFolder.toURI().toString(); - externalDataFilesTableLocation = externalDataFilesTableFolder.toURI().toString(); - typeTestTableLocation = typeTestTableFolder.toURI().toString(); - + public void before() { spark.sql(String.format("CREATE DATABASE IF NOT EXISTS %s", NAMESPACE)); - partitionedIdentifier = destName(defaultSparkCatalog, partitionedTableName); - unpartitionedIdentifier = destName(defaultSparkCatalog, unpartitionedTableName); - externalDataFilesIdentifier = destName(defaultSparkCatalog, externalDataFilesTableName); - typeTestIdentifier = destName(defaultSparkCatalog, typeTestTableName); - - spark.sql(String.format("DROP TABLE IF EXISTS %s", partitionedIdentifier)); - spark.sql(String.format("DROP TABLE IF EXISTS %s", unpartitionedIdentifier)); - spark.sql(String.format("DROP TABLE IF EXISTS %s", externalDataFilesIdentifier)); - spark.sql(String.format("DROP TABLE IF EXISTS %s", typeTestIdentifier)); - // generate the dataframe nestedDataFrame = nestedDataFrame(); typeTestDataFrame = typeTestDataFrame(); - - // write to delta tables - writeDeltaTable(nestedDataFrame, partitionedIdentifier, partitionedLocation, "id"); - writeDeltaTable(nestedDataFrame, unpartitionedIdentifier, unpartitionedLocation, null); - - // Delete a record from the table - spark.sql("DELETE FROM " + partitionedIdentifier + " WHERE id=3"); - spark.sql("DELETE FROM " + unpartitionedIdentifier + " WHERE id=3"); - - // Update a record - spark.sql("UPDATE " + partitionedIdentifier + " SET id=3 WHERE id=1"); - spark.sql("UPDATE " + unpartitionedIdentifier + " SET id=3 WHERE id=1"); } @After public void after() { - // Drop delta lake tables. - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(defaultSparkCatalog, partitionedTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(defaultSparkCatalog, unpartitionedTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(defaultSparkCatalog, externalDataFilesTableName))); - spark.sql( - String.format("DROP TABLE IF EXISTS %s", destName(defaultSparkCatalog, typeTestTableName))); - - // Drop iceberg tables. - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(icebergCatalogName, snapshotPartitionedTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", - destName(icebergCatalogName, snapshotUnpartitionedTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", - destName(icebergCatalogName, snapshotExternalDataFilesTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", - destName(icebergCatalogName, snapshotNewTableLocationTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", - destName(icebergCatalogName, snapshotAdditionalPropertiesTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(icebergCatalogName, snapshotTypeTestTableName))); - - spark.sql(String.format("DROP DATABASE IF EXISTS %s", NAMESPACE)); + spark.sql(String.format("DROP DATABASE IF EXISTS %s CASCADE", NAMESPACE)); Review Comment: this should be in @AfterClass hook instead of @After ########## delta-lake/src/integration/java/org/apache/iceberg/delta/TestSnapshotDeltaLakeTable.java: ########## @@ -121,91 +117,28 @@ public TestSnapshotDeltaLakeTable( } @Before - public void before() throws IOException { - File partitionedFolder = temp1.newFolder(); - File unpartitionedFolder = temp2.newFolder(); - File newIcebergTableFolder = temp3.newFolder(); - File externalDataFilesTableFolder = temp4.newFolder(); - File typeTestTableFolder = temp5.newFolder(); - partitionedLocation = partitionedFolder.toURI().toString(); - unpartitionedLocation = unpartitionedFolder.toURI().toString(); - newIcebergTableLocation = newIcebergTableFolder.toURI().toString(); - externalDataFilesTableLocation = externalDataFilesTableFolder.toURI().toString(); - typeTestTableLocation = typeTestTableFolder.toURI().toString(); - + public void before() { spark.sql(String.format("CREATE DATABASE IF NOT EXISTS %s", NAMESPACE)); - partitionedIdentifier = destName(defaultSparkCatalog, partitionedTableName); - unpartitionedIdentifier = destName(defaultSparkCatalog, unpartitionedTableName); - externalDataFilesIdentifier = destName(defaultSparkCatalog, externalDataFilesTableName); - typeTestIdentifier = destName(defaultSparkCatalog, typeTestTableName); - - spark.sql(String.format("DROP TABLE IF EXISTS %s", partitionedIdentifier)); - spark.sql(String.format("DROP TABLE IF EXISTS %s", unpartitionedIdentifier)); - spark.sql(String.format("DROP TABLE IF EXISTS %s", externalDataFilesIdentifier)); - spark.sql(String.format("DROP TABLE IF EXISTS %s", typeTestIdentifier)); - // generate the dataframe nestedDataFrame = nestedDataFrame(); typeTestDataFrame = typeTestDataFrame(); - - // write to delta tables - writeDeltaTable(nestedDataFrame, partitionedIdentifier, partitionedLocation, "id"); - writeDeltaTable(nestedDataFrame, unpartitionedIdentifier, unpartitionedLocation, null); - - // Delete a record from the table - spark.sql("DELETE FROM " + partitionedIdentifier + " WHERE id=3"); - spark.sql("DELETE FROM " + unpartitionedIdentifier + " WHERE id=3"); - - // Update a record - spark.sql("UPDATE " + partitionedIdentifier + " SET id=3 WHERE id=1"); - spark.sql("UPDATE " + unpartitionedIdentifier + " SET id=3 WHERE id=1"); } @After public void after() { - // Drop delta lake tables. - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(defaultSparkCatalog, partitionedTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(defaultSparkCatalog, unpartitionedTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(defaultSparkCatalog, externalDataFilesTableName))); - spark.sql( - String.format("DROP TABLE IF EXISTS %s", destName(defaultSparkCatalog, typeTestTableName))); - - // Drop iceberg tables. - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(icebergCatalogName, snapshotPartitionedTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", - destName(icebergCatalogName, snapshotUnpartitionedTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", - destName(icebergCatalogName, snapshotExternalDataFilesTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", - destName(icebergCatalogName, snapshotNewTableLocationTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", - destName(icebergCatalogName, snapshotAdditionalPropertiesTableName))); - spark.sql( - String.format( - "DROP TABLE IF EXISTS %s", destName(icebergCatalogName, snapshotTypeTestTableName))); - - spark.sql(String.format("DROP DATABASE IF EXISTS %s", NAMESPACE)); + spark.sql(String.format("DROP DATABASE IF EXISTS %s CASCADE", NAMESPACE)); Review Comment: this should be in `@AfterClass` hook instead of `@After ` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org