BsoBird commented on code in PR #9546: URL: https://github.com/apache/iceberg/pull/9546#discussion_r1528260699
########## core/src/main/java/org/apache/iceberg/hadoop/HadoopTableOperations.java: ########## @@ -149,26 +183,71 @@ public void commit(TableMetadata base, TableMetadata metadata) { String codecName = metadata.property( TableProperties.METADATA_COMPRESSION, TableProperties.METADATA_COMPRESSION_DEFAULT); + // TODO:This is not compatible with the scenario where the user modifies the metadata file + // compression codec arbitrarily. + // We can inform the user about this bug first, and fix it later.(Do not modify the compressed + // format after the table is created.) TableMetadataParser.Codec codec = TableMetadataParser.Codec.fromName(codecName); String fileExtension = TableMetadataParser.getFileExtension(codec); - Path tempMetadataFile = metadataPath(UUID.randomUUID().toString() + fileExtension); + Path tempMetadataFile = metadataPath(UUID.randomUUID() + fileExtension); TableMetadataParser.write(metadata, io().newOutputFile(tempMetadataFile.toString())); int nextVersion = (current.first() != null ? current.first() : 0) + 1; Path finalMetadataFile = metadataFilePath(nextVersion, codec); FileSystem fs = getFileSystem(tempMetadataFile, conf); - - // this rename operation is the atomic commit operation - renameToFinal(fs, tempMetadataFile, finalMetadataFile, nextVersion); - - LOG.info("Committed a new metadata file {}", finalMetadataFile); - - // update the best-effort version pointer - writeVersionHint(nextVersion); - - deleteRemovedMetadataFiles(base, metadata); - - this.shouldRefresh = true; + boolean versionCommitSuccess = false; + try { + deleteOldVersionHint(fs, versionHintFile(), nextVersion); + versionCommitSuccess = commitNewVersion(fs, tempMetadataFile, finalMetadataFile, nextVersion); + if (!versionCommitSuccess) { + // Users should clean up orphaned files after job fail. + // This may be too heavy. But it can stay that way for now. + String msg = + String.format( + "Can not write versionHint. commitVersion = %s.Is there a problem with the file system?", + nextVersion); + throw new RuntimeException(msg); + } else { + this.shouldRefresh = versionCommitSuccess; + // In fact, we don't really care if the metadata cleanup/update succeeds or not, + // if it fails this time, we can execute it in the next commit method call. + // So we should fix the shouldRefresh flag first. + if (this.firstRun) { + this.firstRun = false; + } + LOG.info("Committed a new metadata file {}", finalMetadataFile); + // update the best-effort version pointer + boolean writeVersionHintSuccess = writeVersionHint(fs, nextVersion); + if (!writeVersionHintSuccess) { + LOG.warn( + "Failed to write a new versionHintFile,commit version is [{}], is there a problem with the file system?", + nextVersion); + } + deleteRemovedMetadataFiles(base, metadata); + } + } catch (CommitStateUnknownException | CommitFailedException e) { Review Comment: @nastra Sir. If I catch CleanableFailure, then I need to change the exception handling logic of the commit method. And I need to re-test it. Since I've already made quite a few changes in this version, I don't want to expand on them in this PR. This PR is the basis for fixing the processing logic in HadoopTable, and I think that the introduction of other adaptation logic should be done on top of fixing the basic logic. step by step. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org