gaoshihang opened a new issue, #9851:
URL: https://github.com/apache/iceberg/issues/9851

   ### Apache Iceberg version
   
   1.4.1
   
   ### Query engine
   
   Other
   
   ### Please describe the bug 🐞
   
   I try to using Databricks do Iceberg table compaction, But I encountered 
this issue:
   ```
   If you did not qualify the name with a schema, verify the current_schema() 
output, or qualify the name with the correct schema and catalog.
   To tolerate the error on drop use DROP VIEW IF EXISTS or DROP TABLE IF 
EXISTS.
        at 
org.apache.iceberg.util.ExceptionUtil.castAndThrow(ExceptionUtil.java:39)
        at org.apache.iceberg.util.Tasks.throwOne(Tasks.java:595)
        at org.apache.iceberg.util.Tasks.access$100(Tasks.java:42)
        at org.apache.iceberg.util.Tasks$Builder.runParallel(Tasks.java:394)
        at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:201)
        at org.apache.iceberg.util.Tasks$Builder.run(Tasks.java:196)
        at 
org.apache.iceberg.spark.actions.RewriteDataFilesSparkAction.doExecute(RewriteDataFilesSparkAction.java:283)
        at 
org.apache.iceberg.spark.actions.RewriteDataFilesSparkAction.execute(RewriteDataFilesSparkAction.java:178)
        at 
org.apache.iceberg.spark.actions.RewriteDataFilesSparkAction.execute(RewriteDataFilesSparkAction.java:72)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$read$$iw$$iw$$iw$$iw$$iw$$iw.<init>(
   command-1532616048670819:32)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$read$$iw$$iw$$iw$$iw$$iw.<init>(command-1532616048670819:93)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$read$$iw$$iw$$iw$$iw.<init>(command-1532616048670819:95)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$read$$iw$$iw$$iw.<init>(command-1532616048670819:97)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$read$$iw$$iw.<init>(command-1532616048670819:99)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$read$$iw.<init>(command-1532616048670819:101)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$read.<init>(command-1532616048670819:103)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$read$.<init>(command-1532616048670819:107)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$read$.<clinit>(command-1532616048670819)
        at 
$line99c155946d9a44b8b055ab7d68cf3ffe41.$eval$.$print$lzycompute(<notebook>:7)
        at $line99c155946d9a44b8b055ab7d68cf3ffe41.$eval$.$print(<notebook>:6)
        at $line99c155946d9a44b8b055ab7d68cf3ffe41.$eval.$print(<notebook>)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:747)
        at 
scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1020)
        at 
scala.tools.nsc.interpreter.IMain.$anonfun$interpret$1(IMain.scala:568)
        at 
scala.reflect.internal.util.ScalaClassLoader.asContext(ScalaClassLoader.scala:36)
        at 
scala.reflect.internal.util.ScalaClassLoader.asContext$(ScalaClassLoader.scala:116)
        at 
scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:41)
        at scala.tools.nsc.interpreter.IMain.loadAndRunReq$1(IMain.scala:567)
        at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:594)
        at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:564)
        at 
com.databricks.backend.daemon.driver.DriverILoop.execute(DriverILoop.scala:223)
        at 
com.databricks.backend.daemon.driver.ScalaDriverLocal.$anonfun$repl$1(ScalaDriverLocal.scala:236)
        at 
scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
        at 
com.databricks.backend.daemon.driver.DriverLocal$TrapExitInternal$.trapExit(DriverLocal.scala:1395)
        at 
com.databricks.backend.daemon.driver.DriverLocal$TrapExit$.apply(DriverLocal.scala:1348)
        at 
com.databricks.backend.daemon.driver.ScalaDriverLocal.repl(ScalaDriverLocal.scala:236)
        at 
com.databricks.backend.daemon.driver.DriverLocal.$anonfun$execute$33(DriverLocal.scala:997)
        at 
com.databricks.unity.UCSEphemeralState$Handle.runWith(UCSEphemeralState.scala:41)
        at com.databricks.unity.HandleImpl.runWith(UCSHandle.scala:99)
        at 
com.databricks.backend.daemon.driver.DriverLocal.$anonfun$execute$22(DriverLocal.scala:980)
        at 
com.databricks.logging.UsageLogging.$anonfun$withAttributionContext$1(UsageLogging.scala:426)
        at scala.util.DynamicVariable.withValue(DynamicVariable.scala:62)
        at 
com.databricks.logging.AttributionContext$.withValue(AttributionContext.scala:196)
        at 
com.databricks.logging.UsageLogging.withAttributionContext(UsageLogging.scala:424)
        at 
com.databricks.logging.UsageLogging.withAttributionContext$(UsageLogging.scala:418)
        at 
com.databricks.backend.daemon.driver.DriverLocal.withAttributionContext(DriverLocal.scala:69)
        at 
com.databricks.logging.UsageLogging.withAttributionTags(UsageLogging.scala:470)
        at 
com.databricks.logging.UsageLogging.withAttributionTags$(UsageLogging.scala:455)
        at 
com.databricks.backend.daemon.driver.DriverLocal.withAttributionTags(DriverLocal.scala:69)
        at 
com.databricks.backend.daemon.driver.DriverLocal.execute(DriverLocal.scala:935)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.$anonfun$tryExecutingCommand$1(DriverWrapper.scala:798)
        at scala.util.Try$.apply(Try.scala:213)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.tryExecutingCommand(DriverWrapper.scala:790)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.executeCommandAndGetError(DriverWrapper.scala:643)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.executeCommand(DriverWrapper.scala:744)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.runInnerLoop(DriverWrapper.scala:520)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.runInner(DriverWrapper.scala:436)
        at 
com.databricks.backend.daemon.driver.DriverWrapper.run(DriverWrapper.scala:279)
        at java.lang.Thread.run(Thread.java:750)
   Caused by: org.apache.spark.sql.catalyst.analysis.NoSuchTableException: 
[TABLE_OR_VIEW_NOT_FOUND] The table or view 
`a69eeed1-7689-44af-a0d9-d77cd2c809d3` cannot be found. Verify the spelling and 
correctness of the schema and catalog.
   If you did not qualify the name with a schema, verify the current_schema() 
output, or qualify the name with the correct schema and catalog.
   To tolerate the error on drop use DROP VIEW IF EXISTS or DROP TABLE IF 
EXISTS.
        at 
org.apache.iceberg.spark.SparkCatalog.loadTable(SparkCatalog.java:161)
        at 
org.apache.spark.sql.connector.catalog.CatalogV2Util$.getTable(CatalogV2Util.scala:367)
        at 
org.apache.spark.sql.execution.datasources.v2.DataSourceV2Utils$.loadV2Source(DataSourceV2Utils.scala:135)
        at 
org.apache.spark.sql.DataFrameReader.$anonfun$load$1(DataFrameReader.scala:333)
        at scala.Option.flatMap(Option.scala:271)
        at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:331)
        at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:240)
        at 
org.apache.iceberg.spark.actions.SparkBinPackDataRewriter.doRewrite(SparkBinPackDataRewriter.java:52)
        at 
org.apache.iceberg.spark.actions.SparkSizeBasedDataRewriter.rewrite(SparkSizeBasedDataRewriter.java:58)
        at 
org.apache.iceberg.spark.actions.RewriteDataFilesSparkAction.lambda$rewriteFiles$0(RewriteDataFilesSparkAction.java:243)
        at 
org.apache.iceberg.spark.JobGroupUtils.withJobGroupInfo(JobGroupUtils.java:59)
        at 
org.apache.iceberg.spark.JobGroupUtils.withJobGroupInfo(JobGroupUtils.java:51)
        at 
org.apache.iceberg.spark.actions.BaseSparkAction.withJobGroupInfo(BaseSparkAction.java:132)
        at 
org.apache.iceberg.spark.actions.RewriteDataFilesSparkAction.rewriteFiles(RewriteDataFilesSparkAction.java:241)
        at 
org.apache.iceberg.spark.actions.RewriteDataFilesSparkAction.lambda$doExecute$2(RewriteDataFilesSparkAction.java:285)
        at 
org.apache.iceberg.util.Tasks$Builder.runTaskWithRetry(Tasks.java:413)
        at org.apache.iceberg.util.Tasks$Builder.access$300(Tasks.java:69)
        at org.apache.iceberg.util.Tasks$Builder$1.run(Tasks.java:315)
        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:750)
   ```
   
   The code I used is:
   ```
   import com.google.common.collect.ImmutableMap
   import org.apache.hadoop.conf.Configuration
   import org.apache.iceberg.actions.RewriteDataFiles
   import org.apache.iceberg.catalog.TableIdentifier
   import org.apache.iceberg.hadoop.HadoopCatalog
   import org.apache.iceberg.spark.actions.SparkActions
   import org.apache.spark.sql.SparkSession
   import org.apache.iceberg.spark.Spark3Util
   
   
   val spark = SparkSession.builder().appName("test_iceberg")
      .config("spark.sql.extensions", 
"org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions")
      .config("spark.sql.defaultCatalog", "spark_catalog")
      .config("spark.sql.catalog.spark_catalog", 
"org.apache.iceberg.spark.SparkCatalog")
      .config("spark.sql.catalog.spark_catalog.type", "hadoop")
      .config("spark.sql.catalog.spark_catalog.warehouse", 
"s3://identity-dev-databricks-unity-catalog-root-bucket-us-east-1/hoover_data/")
      .config("spark.sql.catalog.spark_catalog.cache-enabled", "false")
      .getOrCreate()
   
   val hadoopCatalog = new HadoopCatalog()
   hadoopCatalog.setConf(new Configuration())
   hadoopCatalog.initialize("spark_catalog", ImmutableMap.of(
     "warehouse", 
"s3a://identity-dev-databricks-unity-catalog-root-bucket-us-east-1/hoover_data",
     "io-impl", "org.apache.iceberg.aws.s3.S3FileIO"
   ))
   val table = 
hadoopCatalog.loadTable(TableIdentifier.parse("db.hoover_ack_5min_pruning"))
   
   SparkActions
      .get(spark)
      .rewriteDataFiles(table)
      .option(RewriteDataFiles.TARGET_FILE_SIZE_BYTES, (512 * 1024 * 
1024).toString)
      .execute()
   
   spark.stop()
   ```
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to