Fokko commented on PR #11782:
URL: https://github.com/apache/iceberg/pull/11782#issuecomment-2558680432

   Thanks for working on this @tom-s-powell. It turned out I was running into 
the same issue as you, see https://github.com/apache/iceberg/pull/11858.
   
   While testing your solution, I noticed that my logs are flooded with:
   
   ```
   24/12/23 00:23:14 WARN S3FileIO: Unclosed S3FileIO instance created by:
        org.apache.iceberg.aws.s3.S3FileIO.initialize(S3FileIO.java:371)
        org.apache.iceberg.CatalogUtil.loadFileIO(CatalogUtil.java:369)
        
org.apache.iceberg.hive.HiveCatalog.lambda$initialize$1(HiveCatalog.java:126)
        org.apache.iceberg.hive.HiveCatalog.newViewOps(HiveCatalog.java:678)
        
org.apache.iceberg.view.BaseMetastoreViewCatalog.loadView(BaseMetastoreViewCatalog.java:53)
        org.apache.iceberg.catalog.ViewCatalog.viewExists(ViewCatalog.java:65)
        
org.apache.iceberg.hive.HiveCatalog$ViewAwareTableBuilder.createOrReplaceTransaction(HiveCatalog.java:837)
        
org.apache.iceberg.CachingCatalog$CachingTableBuilder.createOrReplaceTransaction(CachingCatalog.java:300)
        
org.apache.iceberg.spark.SparkCatalog.stageCreateOrReplace(SparkCatalog.java:301)
        
org.apache.spark.sql.connector.catalog.StagingTableCatalog.stageCreateOrReplace(StagingTableCatalog.java:191)
        
org.apache.spark.sql.execution.datasources.v2.AtomicReplaceTableAsSelectExec.run(WriteToDataSourceV2Exec.scala:207)
        
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result$lzycompute(V2CommandExec.scala:43)
        
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result(V2CommandExec.scala:43)
        
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.executeCollect(V2CommandExec.scala:49)
        
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:107)
        
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:125)
        
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:201)
        
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:108)
        org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
        
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:66)
        
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:107)
        
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:98)
        
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:461)
        
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:76)
        
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:461)
        
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:32)
        
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
        
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
        
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:32)
        
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:32)
        
org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:437)
        
org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:98)
        
org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:85)
        
org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:83)
        org.apache.spark.sql.Dataset.<init>(Dataset.scala:220)
        org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
        org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
        org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:97)
        org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:638)
        org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
        org.apache.spark.sql.SparkSession.sql(SparkSession.scala:629)
        org.apache.spark.sql.SparkSession.sql(SparkSession.scala:659)
        jdk.internal.reflect.GeneratedMethodAccessor95.invoke(Unknown Source)
        
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        java.base/java.lang.reflect.Method.invoke(Method.java:566)
        py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
        py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
        py4j.Gateway.invoke(Gateway.java:282)
        py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
        py4j.commands.CallCommand.execute(CallCommand.java:79)
        
py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
        py4j.ClientServerConnection.run(ClientServerConnection.java:106)
        java.base/java.lang.Thread.run(Thread.java:829)
   24/12/23 00:23:15 WARN S3FileIO: Unclosed S3FileIO instance created by:
        org.apache.iceberg.aws.s3.S3FileIO.initialize(S3FileIO.java:371)
        org.apache.iceberg.CatalogUtil.loadFileIO(CatalogUtil.java:369)
        
org.apache.iceberg.hive.HiveCatalog.lambda$initialize$1(HiveCatalog.java:126)
        org.apache.iceberg.hive.HiveCatalog.newViewOps(HiveCatalog.java:678)
        
org.apache.iceberg.view.BaseMetastoreViewCatalog.loadView(BaseMetastoreViewCatalog.java:53)
        org.apache.iceberg.catalog.ViewCatalog.viewExists(ViewCatalog.java:65)
        
org.apache.iceberg.hive.HiveCatalog$ViewAwareTableBuilder.createOrReplaceTransaction(HiveCatalog.java:837)
        
org.apache.iceberg.CachingCatalog$CachingTableBuilder.createOrReplaceTransaction(CachingCatalog.java:300)
        
org.apache.iceberg.spark.SparkCatalog.stageCreateOrReplace(SparkCatalog.java:301)
        
org.apache.spark.sql.connector.catalog.StagingTableCatalog.stageCreateOrReplace(StagingTableCatalog.java:191)
        
org.apache.spark.sql.execution.datasources.v2.AtomicReplaceTableAsSelectExec.run(WriteToDataSourceV2Exec.scala:207)
        
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result$lzycompute(V2CommandExec.scala:43)
        
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result(V2CommandExec.scala:43)
        
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.executeCollect(V2CommandExec.scala:49)
        
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:107)
        
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:125)
        
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:201)
        
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:108)
        org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
        
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:66)
        
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:107)
        
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:98)
        
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:461)
        
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:76)
        
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:461)
        
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:32)
        
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
        
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
        
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:32)
        
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:32)
        
org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:437)
        
org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:98)
        
org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:85)
        
org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:83)
        org.apache.spark.sql.Dataset.<init>(Dataset.scala:220)
        org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
        org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
        org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:97)
        org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:638)
        org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
        org.apache.spark.sql.SparkSession.sql(SparkSession.scala:629)
        org.apache.spark.sql.SparkSession.sql(SparkSession.scala:659)
        jdk.internal.reflect.GeneratedMethodAccessor95.invoke(Unknown Source)
        
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        java.base/java.lang.reflect.Method.invoke(Method.java:566)
        py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
        py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
        py4j.Gateway.invoke(Gateway.java:282)
        py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
        py4j.commands.CallCommand.execute(CallCommand.java:79)
        
py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
        py4j.ClientServerConnection.run(ClientServerConnection.java:106)
        java.base/java.lang.Thread.run(Thread.java:829)
   24/12/23 00:23:15 WARN S3FileIO: Unclosed S3FileIO instance created by:
        org.apache.iceberg.aws.s3.S3FileIO.initialize(S3FileIO.java:371)
        org.apache.iceberg.CatalogUtil.loadFileIO(CatalogUtil.java:369)
        
org.apache.iceberg.hive.HiveCatalog.lambda$initialize$1(HiveCatalog.java:126)
        org.apache.iceberg.hive.HiveCatalog.newViewOps(HiveCatalog.java:678)
        
org.apache.iceberg.view.BaseMetastoreViewCatalog.loadView(BaseMetastoreViewCatalog.java:53)
        org.apache.iceberg.catalog.ViewCatalog.viewExists(ViewCatalog.java:65)
        
org.apache.iceberg.hive.HiveCatalog$ViewAwareTableBuilder.createOrReplaceTransaction(HiveCatalog.java:837)
        
org.apache.iceberg.CachingCatalog$CachingTableBuilder.createOrReplaceTransaction(CachingCatalog.java:300)
        
org.apache.iceberg.spark.SparkCatalog.stageCreateOrReplace(SparkCatalog.java:301)
        
org.apache.spark.sql.connector.catalog.StagingTableCatalog.stageCreateOrReplace(StagingTableCatalog.java:191)
        
org.apache.spark.sql.execution.datasources.v2.AtomicReplaceTableExec.run(ReplaceTableExec.scala:75)
        
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result$lzycompute(V2CommandExec.scala:43)
        
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result(V2CommandExec.scala:43)
        
org.apache.spark.sql.execution.datasources.v2.V2CommandExec.executeCollect(V2CommandExec.scala:49)
        
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:107)
        
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:125)
        
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:201)
        
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:108)
        org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
        
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:66)
        
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:107)
        
org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:98)
        
org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:461)
        
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:76)
        
org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:461)
        
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:32)
        
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
        
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
        
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:32)
        
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:32)
        
org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:437)
        
org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:98)
        
org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:85)
        
org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:83)
        org.apache.spark.sql.Dataset.<init>(Dataset.scala:220)
        org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
        org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
        org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:97)
        org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:638)
        org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:900)
        org.apache.spark.sql.SparkSession.sql(SparkSession.scala:629)
        org.apache.spark.sql.SparkSession.sql(SparkSession.scala:659)
        jdk.internal.reflect.GeneratedMethodAccessor95.invoke(Unknown Source)
        
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        java.base/java.lang.reflect.Method.invoke(Method.java:566)
        py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
        py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:374)
        py4j.Gateway.invoke(Gateway.java:282)
        py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
        py4j.commands.CallCommand.execute(CallCommand.java:79)
        
py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
        py4j.ClientServerConnection.run(ClientServerConnection.java:106)
        java.base/java.lang.Thread.run(Thread.java:829)
   ```
   
   Did you try your fix your fix in the while? (I must admit it is not very 
easy to test).


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to