stevenzwu commented on code in PR #10179:
URL: https://github.com/apache/iceberg/pull/10179#discussion_r1624900416


##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/ManifestOutputFileFactory.java:
##########
@@ -41,6 +42,16 @@ class ManifestOutputFileFactory {
   private final long attemptNumber;
   private final AtomicInteger fileCount = new AtomicInteger(0);
 
+  public ManifestOutputFileFactory(
+      Supplier<Table> tableSupplier, Map<String, String> props, String prefix) 
{

Review Comment:
   is `prefix` used here?



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/IcebergSink.java:
##########
@@ -0,0 +1,767 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink;
+
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION;
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION_STRATEGY;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.Function;
+import org.apache.flink.api.common.functions.MapFunction;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.api.connector.sink2.CommitterInitContext;
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.api.connector.sink2.SupportsCommitter;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.streaming.api.connector.sink2.CommittableMessage;
+import 
org.apache.flink.streaming.api.connector.sink2.CommittableMessageTypeInfo;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPostCommitTopology;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPreCommitTopology;
+import org.apache.flink.streaming.api.connector.sink2.SupportsPreWriteTopology;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.util.DataFormatConverters;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.Preconditions;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DistributionMode;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.PartitionField;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.SerializableTable;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.FlinkSchemaUtil;
+import org.apache.iceberg.flink.FlinkWriteConf;
+import org.apache.iceberg.flink.FlinkWriteOptions;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittable;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittableSerializer;
+import org.apache.iceberg.flink.sink.committer.IcebergCommitter;
+import org.apache.iceberg.flink.sink.committer.IcebergWriteAggregator;
+import org.apache.iceberg.flink.sink.committer.WriteResultSerializer;
+import org.apache.iceberg.flink.util.FlinkCompatibilityUtil;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.types.TypeUtil;
+import org.apache.iceberg.util.SerializableSupplier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Flink v2 sink offer different hooks to insert custom topologies into the 
sink. We will use the
+ * following:
+ *
+ * <ul>
+ *   <li>{@link SupportsPreWriteTopology} which redistributes the data to the 
writers based on the
+ *       {@link DistributionMode}
+ *   <li>{@link org.apache.flink.api.connector.sink2.SinkWriter} which writes 
data files, and
+ *       generates a {@link IcebergCommittable} to store the {@link
+ *       org.apache.iceberg.io.WriteResult}
+ *   <li>{@link SupportsPreCommitTopology} which we use to to place the {@link
+ *       IcebergWriteAggregator} which merges the individual {@link
+ *       org.apache.flink.api.connector.sink2.SinkWriter}'s {@link
+ *       org.apache.iceberg.io.WriteResult}s to a single {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}
+ *   <li>{@link IcebergCommitter} which stores the incoming {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}s in state for recovery, 
and commits them to
+ *       the Iceberg table
+ *   <li>{@link SupportsPostCommitTopology} we could use for incremental 
compaction later. This is
+ *       not implemented yet.
+ * </ul>
+ *
+ * The job graph looks like below:
+ *
+ * <pre>{@code
+ *                            Flink sink
+ *               
+-----------------------------------------------------------------------------------+
+ *               |                                                             
                      |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ * | Map 1 | ==> | | writer 1 |                               | committer 1 | 
---> | post commit 1 | |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ *               |             \                             /                
\                      |
+ *               |              \                           /                  
\                     |
+ *               |               \                         /                   
 \                    |
+ * +-------+     | +----------+   \ +-------------------+ /   +-------------+  
  \ +---------------+ |
+ * | Map 2 | ==> | | writer 2 | --->| commit aggregator |     | committer 2 |  
    | post commit 2 | |
+ * +-------+     | +----------+     +-------------------+     +-------------+  
    +---------------+ |
+ *               |                                             Commit only on  
                      |
+ *               |                                             committer 1     
                      |
+ *               
+-----------------------------------------------------------------------------------+
+ * }</pre>
+ */
+public class IcebergSink
+    implements Sink<RowData>,
+        SupportsPreWriteTopology<RowData>,
+        SupportsCommitter<IcebergCommittable>,
+        SupportsPreCommitTopology<WriteResult, IcebergCommittable>,
+        SupportsPostCommitTopology<IcebergCommittable> {
+  private Table initTable;
+  private final TableLoader tableLoader;
+  private final Map<String, String> snapshotProperties;
+  private String uidPrefix;
+  private Committer<IcebergCommittable> sinkCommitter;
+  private final String sinkId;
+  private transient Builder builder;
+  private Map<String, String> writeProperties;
+  private RowType flinkRowType;
+  private SerializableSupplier<Table> tableSupplier;
+  private transient FlinkWriteConf flinkWriteConf;
+  private List<Integer> equalityFieldIds;
+  private boolean upsertMode;
+  private FileFormat dataFileFormat;
+  private long targetDataFileSize;
+  private String branch;
+  private boolean overwriteMode;
+  private int workerPoolSize;
+  private static final Logger LOG = LoggerFactory.getLogger(IcebergSink.class);
+
+  private IcebergSink(Builder builder) {
+    this.builder = builder;

Review Comment:
   do we need to remember `builder` like here?



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommittableSerializer.java:
##########
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.core.memory.DataInputDeserializer;
+import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
+
+/**
+ * This serializer is used for serializing the {@link IcebergCommittable} 
objects between the Writer
+ * and the Aggregator operator and between the Aggregator and the Committer as 
well.
+ *
+ * <p>In both cases only the respective part is serialized.
+ */
+public class IcebergCommittableSerializer implements 
SimpleVersionedSerializer<IcebergCommittable> {
+  private static final int VERSION = 1;
+
+  @Override
+  public int getVersion() {
+    return VERSION;
+  }
+
+  @Override
+  public byte[] serialize(IcebergCommittable committable) throws IOException {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    DataOutputViewStreamWrapper view = new DataOutputViewStreamWrapper(out);
+    view.writeUTF(committable.jobId());
+    view.writeUTF(committable.operatorId());
+    view.writeLong(committable.checkpointId());
+    view.writeInt(committable.manifest().length);
+    view.write(committable.manifest());
+    return out.toByteArray();
+  }
+
+  @Override
+  public IcebergCommittable deserialize(int version, byte[] serialized) throws 
IOException {
+    if (version == VERSION) {

Review Comment:
   nit: typically use the constant `1` here, as VERSION is just the 
latest/default version. see example from Flink's `FileSourceSplitSerializer`



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergWriteAggregator.java:
##########
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.function.Supplier;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.flink.streaming.api.connector.sink2.CommittableMessage;
+import org.apache.flink.streaming.api.connector.sink2.CommittableSummary;
+import org.apache.flink.streaming.api.connector.sink2.CommittableWithLineage;
+import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
+import org.apache.flink.streaming.api.operators.OneInputStreamOperator;
+import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
+import org.apache.iceberg.DeleteFile;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ManifestFiles;
+import org.apache.iceberg.ManifestWriter;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.ManifestOutputFileFactory;
+import org.apache.iceberg.io.OutputFile;
+import org.apache.iceberg.io.WriteResult;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Operator which aggregates the individual {@link WriteResult} objects) to a 
single {@link
+ * IcebergCommittable} per checkpoint (storing the serialized {@link 
DeltaManifests}, jobId,
+ * operatorId, checkpointId)
+ */
+public class IcebergWriteAggregator
+    extends AbstractStreamOperator<CommittableMessage<IcebergCommittable>>
+    implements OneInputStreamOperator<
+        CommittableMessage<WriteResult>, 
CommittableMessage<IcebergCommittable>> {
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergWriteAggregator.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private final Collection<WriteResult> results;
+  private transient ManifestOutputFileFactory icebergManifestOutputFileFactory;
+  private transient Table table;
+  private final TableLoader tableLoader;
+  private final String prefix;
+  private static final int FORMAT_V2 = 2;
+  private static final Long DUMMY_SNAPSHOT_ID = 0L;
+
+  public IcebergWriteAggregator(TableLoader tableLoader, String prefix) {
+    this.results = Sets.newHashSet();
+    this.tableLoader = tableLoader;
+    this.prefix = prefix;
+  }
+
+  @Override
+  public void open() throws Exception {
+    if (!tableLoader.isOpen()) {
+      tableLoader.open();
+
+      this.table = tableLoader.loadTable();
+      this.icebergManifestOutputFileFactory =
+          FlinkManifestUtil.createOutputFileFactory(() -> table, 
table.properties(), prefix);
+    }
+  }
+
+  @Override
+  public void finish() throws IOException {
+    prepareSnapshotPreBarrier(Long.MAX_VALUE);
+  }
+
+  @Override
+  public void prepareSnapshotPreBarrier(long checkpointId) throws IOException {
+    IcebergCommittable committable =
+        new IcebergCommittable(
+            writeToManifest(results, checkpointId),
+            getContainingTask().getEnvironment().getJobID().toString(),
+            getRuntimeContext().getOperatorUniqueID(),
+            checkpointId);
+    CommittableMessage<IcebergCommittable> message =
+        new CommittableWithLineage<>(committable, checkpointId, 0);
+    CommittableMessage<IcebergCommittable> summary =
+        new CommittableSummary<>(0, 1, checkpointId, 1, 1, 0);
+    output.collect(new StreamRecord<>(summary));
+    output.collect(new StreamRecord<>(message));
+    LOG.info("Aggregated commit message emitted {}", message);
+    results.clear();
+  }
+
+  /**
+   * Write all the completed data files to a newly created manifest file and 
return the manifest's
+   * avro serialized bytes.
+   */
+  public byte[] writeToManifest(Collection<WriteResult> writeResults, long 
checkpointId)
+      throws IOException {
+    if (writeResults.isEmpty()) {
+      return EMPTY_MANIFEST_DATA;
+    }
+
+    WriteResult result = WriteResult.builder().addAll(writeResults).build();
+    DeltaManifests deltaManifests =
+        writeCompletedFiles(
+            result, () -> 
icebergManifestOutputFileFactory.create(checkpointId), table.spec());
+
+    return SimpleVersionedSerialization.writeVersionAndSerialize(
+        DeltaManifestsSerializer.INSTANCE, deltaManifests);
+  }
+
+  /**
+   * Write the {@link WriteResult} to temporary manifest files.
+   *
+   * @param result all those DataFiles/DeleteFiles in this WriteResult should 
be written with same
+   *     partition spec
+   */
+  public static DeltaManifests writeCompletedFiles(

Review Comment:
   this static util method already exists in `FlinkManifestUtil`



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommitter.java:
##########
@@ -0,0 +1,434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ReplacePartitions;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotUpdate;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.CommitSummary;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.IcebergFilesCommitterMetrics;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the Flink SinkV2 {@link Committer} interface to 
implement the Iceberg
+ * commits. The implementation builds on the following assumptions:
+ *
+ * <ul>
+ *   <li>There is a single {@link IcebergCommittable} for every checkpoint
+ *   <li>There is no late checkpoint - if checkpoint 'x' has received in one 
call, then after a
+ *       successful run only checkpoints &gt; x will arrive
+ *   <li>There is no other writer which would generate another commit to the 
same branch with the
+ *       same jobId-operatorId-checkpointId triplet
+ * </ul>
+ */
+public class IcebergCommitter implements Committer<IcebergCommittable>, 
Serializable {
+  private static final long serialVersionUID = 1L;
+  private static final String MAX_COMMITTED_CHECKPOINT_ID = 
"flink.max-committed-checkpoint-id";
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergCommitter.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private static final CommitRequest<IcebergCommittable> EMPTY_COMMIT_REQUEST =
+      new CommitRequest<IcebergCommittable>() {
+        @Override
+        public IcebergCommittable getCommittable() {
+          return new IcebergCommittable(EMPTY_MANIFEST_DATA, "jobId", 
"operatorId", -1L);
+        }
+
+        @Override
+        public int getNumberOfRetries() {
+          return 0;
+        }
+
+        @Override
+        public void signalFailedWithKnownReason(Throwable t) {}
+
+        @Override
+        public void signalFailedWithUnknownReason(Throwable t) {}
+
+        @Override
+        public void retryLater() {}
+
+        @Override
+        public void updateAndRetryLater(IcebergCommittable committable) {}
+
+        @Override
+        public void signalAlreadyCommitted() {}
+      };
+
+  @VisibleForTesting
+  static final String MAX_CONTINUOUS_EMPTY_COMMITS = 
"flink.max-continuous-empty-commits";
+
+  public static final long INITIAL_CHECKPOINT_ID = -1L;
+  public static final String FLINK_JOB_ID = "flink.job-id";
+  public static final String OPERATOR_ID = "flink.operator-id";
+  private final String branch;
+  private final Map<String, String> snapshotProperties;
+  private final boolean replacePartitions;
+  private final int workerPoolSize;
+  private final String prefix;
+  private transient IcebergFilesCommitterMetrics committerMetrics;
+  private transient Table table;
+  private transient int maxContinuousEmptyCommits;
+  private transient ExecutorService workerPool;
+  private transient int continuousEmptyCheckpoints = 0;
+
+  public IcebergCommitter(
+      TableLoader tableLoader,
+      String branch,
+      Map<String, String> snapshotProperties,
+      boolean replacePartitions,
+      int workerPoolSize,
+      String prefix,
+      IcebergFilesCommitterMetrics committerMetrics) {
+    this.branch = branch;
+    this.snapshotProperties = snapshotProperties;
+    this.replacePartitions = replacePartitions;
+    this.workerPoolSize = workerPoolSize;
+    this.prefix = prefix;

Review Comment:
   this.prefix doesn't seem to be used later on. it seems `prefix` is only used 
as worker pool size. technically, it is not used as `prefix`.
   
   old code use operator ID in the worker pool name. I guess it is not 
obtainable in the v2 sink?



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/IcebergSink.java:
##########
@@ -0,0 +1,767 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink;
+
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION;
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION_STRATEGY;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.Function;
+import org.apache.flink.api.common.functions.MapFunction;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.api.connector.sink2.CommitterInitContext;
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.api.connector.sink2.SupportsCommitter;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.streaming.api.connector.sink2.CommittableMessage;
+import 
org.apache.flink.streaming.api.connector.sink2.CommittableMessageTypeInfo;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPostCommitTopology;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPreCommitTopology;
+import org.apache.flink.streaming.api.connector.sink2.SupportsPreWriteTopology;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.util.DataFormatConverters;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.Preconditions;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DistributionMode;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.PartitionField;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.SerializableTable;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.FlinkSchemaUtil;
+import org.apache.iceberg.flink.FlinkWriteConf;
+import org.apache.iceberg.flink.FlinkWriteOptions;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittable;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittableSerializer;
+import org.apache.iceberg.flink.sink.committer.IcebergCommitter;
+import org.apache.iceberg.flink.sink.committer.IcebergWriteAggregator;
+import org.apache.iceberg.flink.sink.committer.WriteResultSerializer;
+import org.apache.iceberg.flink.util.FlinkCompatibilityUtil;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.types.TypeUtil;
+import org.apache.iceberg.util.SerializableSupplier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Flink v2 sink offer different hooks to insert custom topologies into the 
sink. We will use the
+ * following:
+ *
+ * <ul>
+ *   <li>{@link SupportsPreWriteTopology} which redistributes the data to the 
writers based on the
+ *       {@link DistributionMode}
+ *   <li>{@link org.apache.flink.api.connector.sink2.SinkWriter} which writes 
data files, and
+ *       generates a {@link IcebergCommittable} to store the {@link
+ *       org.apache.iceberg.io.WriteResult}
+ *   <li>{@link SupportsPreCommitTopology} which we use to to place the {@link
+ *       IcebergWriteAggregator} which merges the individual {@link
+ *       org.apache.flink.api.connector.sink2.SinkWriter}'s {@link
+ *       org.apache.iceberg.io.WriteResult}s to a single {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}
+ *   <li>{@link IcebergCommitter} which stores the incoming {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}s in state for recovery, 
and commits them to
+ *       the Iceberg table
+ *   <li>{@link SupportsPostCommitTopology} we could use for incremental 
compaction later. This is
+ *       not implemented yet.
+ * </ul>
+ *
+ * The job graph looks like below:
+ *
+ * <pre>{@code
+ *                            Flink sink
+ *               
+-----------------------------------------------------------------------------------+
+ *               |                                                             
                      |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ * | Map 1 | ==> | | writer 1 |                               | committer 1 | 
---> | post commit 1 | |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ *               |             \                             /                
\                      |
+ *               |              \                           /                  
\                     |
+ *               |               \                         /                   
 \                    |
+ * +-------+     | +----------+   \ +-------------------+ /   +-------------+  
  \ +---------------+ |
+ * | Map 2 | ==> | | writer 2 | --->| commit aggregator |     | committer 2 |  
    | post commit 2 | |
+ * +-------+     | +----------+     +-------------------+     +-------------+  
    +---------------+ |
+ *               |                                             Commit only on  
                      |
+ *               |                                             committer 1     
                      |
+ *               
+-----------------------------------------------------------------------------------+
+ * }</pre>
+ */
+public class IcebergSink

Review Comment:
   let's mark it as `@Experimental` to star with



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommitter.java:
##########
@@ -0,0 +1,434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ReplacePartitions;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotUpdate;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.CommitSummary;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.IcebergFilesCommitterMetrics;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the Flink SinkV2 {@link Committer} interface to 
implement the Iceberg
+ * commits. The implementation builds on the following assumptions:
+ *
+ * <ul>
+ *   <li>There is a single {@link IcebergCommittable} for every checkpoint
+ *   <li>There is no late checkpoint - if checkpoint 'x' has received in one 
call, then after a
+ *       successful run only checkpoints &gt; x will arrive
+ *   <li>There is no other writer which would generate another commit to the 
same branch with the
+ *       same jobId-operatorId-checkpointId triplet
+ * </ul>
+ */
+public class IcebergCommitter implements Committer<IcebergCommittable>, 
Serializable {
+  private static final long serialVersionUID = 1L;
+  private static final String MAX_COMMITTED_CHECKPOINT_ID = 
"flink.max-committed-checkpoint-id";
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergCommitter.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private static final CommitRequest<IcebergCommittable> EMPTY_COMMIT_REQUEST =
+      new CommitRequest<IcebergCommittable>() {
+        @Override
+        public IcebergCommittable getCommittable() {
+          return new IcebergCommittable(EMPTY_MANIFEST_DATA, "jobId", 
"operatorId", -1L);
+        }
+
+        @Override
+        public int getNumberOfRetries() {
+          return 0;
+        }
+
+        @Override
+        public void signalFailedWithKnownReason(Throwable t) {}
+
+        @Override
+        public void signalFailedWithUnknownReason(Throwable t) {}
+
+        @Override
+        public void retryLater() {}
+
+        @Override
+        public void updateAndRetryLater(IcebergCommittable committable) {}
+
+        @Override
+        public void signalAlreadyCommitted() {}
+      };
+
+  @VisibleForTesting
+  static final String MAX_CONTINUOUS_EMPTY_COMMITS = 
"flink.max-continuous-empty-commits";
+
+  public static final long INITIAL_CHECKPOINT_ID = -1L;
+  public static final String FLINK_JOB_ID = "flink.job-id";
+  public static final String OPERATOR_ID = "flink.operator-id";
+  private final String branch;
+  private final Map<String, String> snapshotProperties;
+  private final boolean replacePartitions;
+  private final int workerPoolSize;
+  private final String prefix;
+  private transient IcebergFilesCommitterMetrics committerMetrics;
+  private transient Table table;
+  private transient int maxContinuousEmptyCommits;
+  private transient ExecutorService workerPool;
+  private transient int continuousEmptyCheckpoints = 0;
+
+  public IcebergCommitter(
+      TableLoader tableLoader,
+      String branch,
+      Map<String, String> snapshotProperties,
+      boolean replacePartitions,
+      int workerPoolSize,
+      String prefix,
+      IcebergFilesCommitterMetrics committerMetrics) {
+    this.branch = branch;
+    this.snapshotProperties = snapshotProperties;
+    this.replacePartitions = replacePartitions;
+    this.workerPoolSize = workerPoolSize;
+    this.prefix = prefix;
+    this.committerMetrics = committerMetrics;
+
+    if (!tableLoader.isOpen()) {
+      tableLoader.open();
+    }
+
+    this.table = tableLoader.loadTable();
+    this.maxContinuousEmptyCommits =
+        PropertyUtil.propertyAsInt(table.properties(), 
MAX_CONTINUOUS_EMPTY_COMMITS, 10);
+    Preconditions.checkArgument(
+        maxContinuousEmptyCommits > 0, MAX_CONTINUOUS_EMPTY_COMMITS + " must 
be positive");
+    this.workerPool =
+        ThreadPools.newWorkerPool("iceberg-worker-pool-" + table + "-" + 
prefix, workerPoolSize);
+    this.continuousEmptyCheckpoints = 0;
+  }
+
+  @Override
+  public void commit(Collection<CommitRequest<IcebergCommittable>> 
commitRequests)
+      throws IOException, InterruptedException {
+    NavigableMap<Long, byte[]> manifestMap = Maps.newTreeMap();
+    NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap = 
Maps.newTreeMap();
+
+    if (commitRequests.isEmpty()) {
+      commitRequests.add(EMPTY_COMMIT_REQUEST);
+    }
+
+    for (CommitRequest<IcebergCommittable> request : commitRequests) {
+      manifestMap.put(request.getCommittable().checkpointId(), 
request.getCommittable().manifest());
+      commitRequestMap.put(request.getCommittable().checkpointId(), request);
+    }
+
+    long maxCommittedCheckpointId = 
getMaxCommittedCheckpointId(commitRequestMap);
+
+    // Mark the already committed FilesCommittable(s) as finished
+    commitRequestMap
+        .headMap(maxCommittedCheckpointId, true)
+        .values()
+        .forEach(CommitRequest::signalAlreadyCommitted);
+
+    // Commit the remaining
+    IcebergCommittable last = 
commitRequestMap.lastEntry().getValue().getCommittable();
+
+    NavigableMap<Long, byte[]> uncommitted =
+        Maps.newTreeMap(manifestMap).tailMap(maxCommittedCheckpointId, false);
+    if (!uncommitted.isEmpty()) {
+      commitUpToCheckpoint(
+          commitRequestMap, uncommitted, last.jobId(), last.operatorId(), 
last.checkpointId());
+    }
+  }
+
+  /**
+   * Gets the last checkpointId which is committed to this branch of the 
table. The commits are
+   * identified by the {@link IcebergCommittable} 
(jobId/operatorId/checkpointId). Only used by
+   * {@link IcebergCommittable} (SinkV2).
+   *
+   * @param requests The {@link IcebergCommittable}s ordered by checkpointId
+   * @return The checkpointId for the first commit (historically backward)
+   */
+  public long getMaxCommittedCheckpointId(
+      NavigableMap<Long, CommitRequest<IcebergCommittable>> requests) {
+    Snapshot snapshot = table.snapshot(branch);
+    long lastCommittedCheckpointId = INITIAL_CHECKPOINT_ID;
+
+    while (snapshot != null) {
+      Map<String, String> summary = snapshot.summary();
+      String snapshotFlinkJobId = summary.get(FLINK_JOB_ID);
+      String snapshotOperatorId = summary.get(OPERATOR_ID);
+      String snapshotCheckpointId = summary.get(MAX_COMMITTED_CHECKPOINT_ID);
+      if (snapshotCheckpointId != null) {
+        long checkpointId = Long.parseLong(snapshotCheckpointId);
+        CommitRequest<IcebergCommittable> request = requests.get(checkpointId);
+        if (request != null
+            && request.getCommittable().jobId().equals(snapshotFlinkJobId)
+            && 
request.getCommittable().operatorId().equals(snapshotOperatorId)) {
+          lastCommittedCheckpointId = checkpointId;
+          break;
+        }
+      }
+
+      Long parentSnapshotId = snapshot.parentId();
+      snapshot = parentSnapshotId != null ? table.snapshot(parentSnapshotId) : 
null;
+    }
+
+    return lastCommittedCheckpointId;
+  }
+
+  /**
+   * Commits the data to the Iceberg table by reading the file data from the 
{@link DeltaManifests}
+   * ordered by the checkpointId, and writing the new snapshot to the Iceberg 
table. The {@link
+   * org.apache.iceberg.SnapshotSummary} will contain the jobId, snapshotId, 
checkpointId so in case
+   * of job restart we can identify which changes are committed, and which are 
still waiting for the
+   * commit.
+   *
+   * @param deltaManifestsMap The checkpointId to {@link DeltaManifests} map 
of the changes to
+   *     commit
+   * @param newFlinkJobId The jobId to store in the {@link 
org.apache.iceberg.SnapshotSummary}
+   * @param operatorId The operatorId to store in the {@link 
org.apache.iceberg.SnapshotSummary}
+   * @param checkpointId The checkpointId to store in the {@link 
org.apache.iceberg.SnapshotSummary}
+   * @throws IOException On commit failure
+   */
+  public void commitUpToCheckpoint(
+      NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap,
+      NavigableMap<Long, byte[]> deltaManifestsMap,
+      String newFlinkJobId,
+      String operatorId,
+      long checkpointId)
+      throws IOException {
+    NavigableMap<Long, byte[]> pendingMap = 
deltaManifestsMap.headMap(checkpointId, true);
+    List<ManifestFile> manifests = Lists.newArrayList();
+    NavigableMap<Long, WriteResult> pendingResults = Maps.newTreeMap();
+    for (Map.Entry<Long, byte[]> e : pendingMap.entrySet()) {
+      if (Arrays.equals(EMPTY_MANIFEST_DATA, e.getValue())) {
+        // Skip the empty flink manifest.
+        continue;
+      }
+
+      DeltaManifests deltaManifests =
+          SimpleVersionedSerialization.readVersionAndDeSerialize(
+              DeltaManifestsSerializer.INSTANCE, e.getValue());
+      pendingResults.put(
+          e.getKey(),
+          FlinkManifestUtil.readCompletedFiles(deltaManifests, table.io(), 
table.specs()));
+      manifests.addAll(deltaManifests.manifests());
+    }
+
+    CommitSummary summary = new CommitSummary(pendingResults);
+    commitPendingResult(
+        commitRequestMap, pendingResults, summary, newFlinkJobId, operatorId, 
checkpointId);
+    if (committerMetrics != null) {
+      committerMetrics.updateCommitSummary(summary);
+    }
+    pendingMap.clear();
+    deleteCommittedManifests(manifests, newFlinkJobId, checkpointId);
+  }
+
+  private void commitPendingResult(
+      NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap,
+      NavigableMap<Long, WriteResult> pendingResults,
+      CommitSummary summary,
+      String newFlinkJobId,
+      String operatorId,
+      long checkpointId) {
+    long totalFiles = summary.dataFilesCount() + summary.deleteFilesCount();
+    continuousEmptyCheckpoints = totalFiles == 0 ? continuousEmptyCheckpoints 
+ 1 : 0;
+    if (totalFiles != 0 || continuousEmptyCheckpoints % 
maxContinuousEmptyCommits == 0) {
+      if (replacePartitions) {
+        replacePartitions(
+            commitRequestMap, pendingResults, summary, newFlinkJobId, 
operatorId, checkpointId);
+      } else {
+        commitDeltaTxn(
+            commitRequestMap, pendingResults, summary, newFlinkJobId, 
operatorId, checkpointId);
+      }
+      continuousEmptyCheckpoints = 0;
+    } else {
+      LOG.info("Skip commit for checkpoint {} due to no data files or delete 
files.", checkpointId);
+    }
+  }
+
+  private void replacePartitions(
+      NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap,
+      NavigableMap<Long, WriteResult> pendingResults,
+      CommitSummary summary,
+      String newFlinkJobId,
+      String operatorId,
+      long checkpointId) {
+    Preconditions.checkState(
+        summary.deleteFilesCount() == 0, "Cannot overwrite partitions with 
delete files.");
+    // Commit the overwrite transaction.
+    ReplacePartitions dynamicOverwrite = 
table.newReplacePartitions().scanManifestsWith(workerPool);
+    for (WriteResult result : pendingResults.values()) {
+      Preconditions.checkState(
+          result.referencedDataFiles().length == 0, "Should have no referenced 
data files.");
+      Arrays.stream(result.dataFiles()).forEach(dynamicOverwrite::addFile);
+    }
+
+    commitOperation(
+        commitRequestMap,
+        dynamicOverwrite,
+        summary,
+        "dynamic partition overwrite",
+        newFlinkJobId,
+        operatorId,
+        checkpointId);
+  }
+
+  private void commitDeltaTxn(
+      NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap,
+      NavigableMap<Long, WriteResult> pendingResults,
+      CommitSummary summary,
+      String newFlinkJobId,
+      String operatorId,
+      long checkpointId) {
+    if (summary.deleteFilesCount() == 0) {
+      // To be compatible with iceberg format V1.
+      AppendFiles appendFiles = 
table.newAppend().scanManifestsWith(workerPool);
+      for (WriteResult result : pendingResults.values()) {
+        Preconditions.checkState(
+            result.referencedDataFiles().length == 0,
+            "Should have no referenced data files for append.");
+        Arrays.stream(result.dataFiles()).forEach(appendFiles::appendFile);
+      }
+      // fail all commits as really its only one
+      commitOperation(
+          commitRequestMap,
+          appendFiles,
+          summary,
+          "append",
+          newFlinkJobId,
+          operatorId,
+          checkpointId);
+    } else {
+      // To be compatible with iceberg format V2.
+      for (Map.Entry<Long, WriteResult> e : pendingResults.entrySet()) {
+        // We don't commit the merged result into a single transaction because 
for the sequential
+        // transaction txn1 and txn2, the equality-delete files of txn2 are 
required to be applied
+        // to data files from txn1. Committing the merged one will lead to the 
incorrect delete
+        // semantic.
+        WriteResult result = e.getValue();
+
+        // Row delta validations are not needed for streaming changes that 
write equality deletes.
+        // Equality deletes are applied to data in all previous sequence 
numbers, so retries may
+        // push deletes further in the future, but do not affect correctness. 
Position deletes
+        // committed to the table in this path are used only to delete rows 
from data files that are
+        // being added in this commit. There is no way for data files added 
along with the delete
+        // files to be concurrently removed, so there is no need to validate 
the files referenced by
+        // the position delete files that are being committed.
+        RowDelta rowDelta = table.newRowDelta().scanManifestsWith(workerPool);
+
+        Arrays.stream(result.dataFiles()).forEach(rowDelta::addRows);
+        Arrays.stream(result.deleteFiles()).forEach(rowDelta::addDeletes);
+        commitOperation(
+            commitRequestMap, rowDelta, summary, "rowDelta", newFlinkJobId, 
operatorId, e.getKey());
+      }
+    }
+  }
+
+  private void commitOperation(
+      NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap,
+      SnapshotUpdate<?> operation,
+      CommitSummary summary,
+      String description,
+      String newFlinkJobId,
+      String operatorId,
+      long checkpointId) {
+    try {
+      LOG.info(
+          "Committing {} for checkpoint {} to table {} branch {} with summary: 
{}",
+          description,
+          checkpointId,
+          table.name(),
+          branch,
+          summary);
+      snapshotProperties.forEach(operation::set);
+      // custom snapshot metadata properties will be overridden if they 
conflict with internal ones
+      // used by the sink.
+      operation.set(MAX_COMMITTED_CHECKPOINT_ID, Long.toString(checkpointId));
+      operation.set(FLINK_JOB_ID, newFlinkJobId);
+      operation.set(OPERATOR_ID, operatorId);
+      operation.toBranch(branch);
+
+      long startNano = System.nanoTime();
+      operation.commit(); // abort is automatically called if this fails.
+      long durationMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - 
startNano);
+      LOG.info(
+          "Committed {} to table: {}, branch: {}, checkpointId {} in {} ms",
+          description,
+          table.name(),
+          branch,
+          checkpointId,
+          durationMs);
+      if (committerMetrics != null) {
+        committerMetrics.commitDuration(durationMs);
+      }
+    } catch (Exception e) {
+      commitRequestMap.tailMap(checkpointId, 
true).values().forEach(CommitRequest::retryLater);
+    }
+  }
+
+  private void deleteCommittedManifests(

Review Comment:
   nit: this can be made a static util method for reuse if table io is passed in



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/ManifestOutputFileFactory.java:
##########
@@ -41,6 +42,16 @@ class ManifestOutputFileFactory {
   private final long attemptNumber;
   private final AtomicInteger fileCount = new AtomicInteger(0);
 
+  public ManifestOutputFileFactory(
+      Supplier<Table> tableSupplier, Map<String, String> props, String prefix) 
{
+    this.tableSupplier = tableSupplier;
+    this.props = props;
+    this.flinkJobId = null;

Review Comment:
   this can be problematic. see the `generatePath` method.



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommitter.java:
##########
@@ -0,0 +1,434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ReplacePartitions;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotUpdate;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.CommitSummary;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.IcebergFilesCommitterMetrics;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the Flink SinkV2 {@link Committer} interface to 
implement the Iceberg
+ * commits. The implementation builds on the following assumptions:
+ *
+ * <ul>
+ *   <li>There is a single {@link IcebergCommittable} for every checkpoint
+ *   <li>There is no late checkpoint - if checkpoint 'x' has received in one 
call, then after a
+ *       successful run only checkpoints &gt; x will arrive
+ *   <li>There is no other writer which would generate another commit to the 
same branch with the
+ *       same jobId-operatorId-checkpointId triplet
+ * </ul>
+ */
+public class IcebergCommitter implements Committer<IcebergCommittable>, 
Serializable {

Review Comment:
   nit: mark as `@Internal`? same for other classes in this sub package?



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommitter.java:
##########
@@ -0,0 +1,434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ReplacePartitions;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotUpdate;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.CommitSummary;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.IcebergFilesCommitterMetrics;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the Flink SinkV2 {@link Committer} interface to 
implement the Iceberg
+ * commits. The implementation builds on the following assumptions:
+ *
+ * <ul>
+ *   <li>There is a single {@link IcebergCommittable} for every checkpoint
+ *   <li>There is no late checkpoint - if checkpoint 'x' has received in one 
call, then after a
+ *       successful run only checkpoints &gt; x will arrive
+ *   <li>There is no other writer which would generate another commit to the 
same branch with the
+ *       same jobId-operatorId-checkpointId triplet
+ * </ul>
+ */
+public class IcebergCommitter implements Committer<IcebergCommittable>, 
Serializable {
+  private static final long serialVersionUID = 1L;
+  private static final String MAX_COMMITTED_CHECKPOINT_ID = 
"flink.max-committed-checkpoint-id";
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergCommitter.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private static final CommitRequest<IcebergCommittable> EMPTY_COMMIT_REQUEST =
+      new CommitRequest<IcebergCommittable>() {
+        @Override
+        public IcebergCommittable getCommittable() {
+          return new IcebergCommittable(EMPTY_MANIFEST_DATA, "jobId", 
"operatorId", -1L);
+        }
+
+        @Override
+        public int getNumberOfRetries() {
+          return 0;
+        }
+
+        @Override
+        public void signalFailedWithKnownReason(Throwable t) {}
+
+        @Override
+        public void signalFailedWithUnknownReason(Throwable t) {}
+
+        @Override
+        public void retryLater() {}
+
+        @Override
+        public void updateAndRetryLater(IcebergCommittable committable) {}
+
+        @Override
+        public void signalAlreadyCommitted() {}
+      };
+
+  @VisibleForTesting
+  static final String MAX_CONTINUOUS_EMPTY_COMMITS = 
"flink.max-continuous-empty-commits";
+
+  public static final long INITIAL_CHECKPOINT_ID = -1L;
+  public static final String FLINK_JOB_ID = "flink.job-id";
+  public static final String OPERATOR_ID = "flink.operator-id";
+  private final String branch;
+  private final Map<String, String> snapshotProperties;
+  private final boolean replacePartitions;
+  private final int workerPoolSize;
+  private final String prefix;
+  private transient IcebergFilesCommitterMetrics committerMetrics;
+  private transient Table table;
+  private transient int maxContinuousEmptyCommits;
+  private transient ExecutorService workerPool;
+  private transient int continuousEmptyCheckpoints = 0;
+
+  public IcebergCommitter(
+      TableLoader tableLoader,
+      String branch,
+      Map<String, String> snapshotProperties,
+      boolean replacePartitions,
+      int workerPoolSize,
+      String prefix,
+      IcebergFilesCommitterMetrics committerMetrics) {
+    this.branch = branch;
+    this.snapshotProperties = snapshotProperties;
+    this.replacePartitions = replacePartitions;
+    this.workerPoolSize = workerPoolSize;
+    this.prefix = prefix;
+    this.committerMetrics = committerMetrics;
+
+    if (!tableLoader.isOpen()) {
+      tableLoader.open();
+    }
+
+    this.table = tableLoader.loadTable();
+    this.maxContinuousEmptyCommits =
+        PropertyUtil.propertyAsInt(table.properties(), 
MAX_CONTINUOUS_EMPTY_COMMITS, 10);
+    Preconditions.checkArgument(
+        maxContinuousEmptyCommits > 0, MAX_CONTINUOUS_EMPTY_COMMITS + " must 
be positive");
+    this.workerPool =
+        ThreadPools.newWorkerPool("iceberg-worker-pool-" + table + "-" + 
prefix, workerPoolSize);
+    this.continuousEmptyCheckpoints = 0;
+  }
+
+  @Override
+  public void commit(Collection<CommitRequest<IcebergCommittable>> 
commitRequests)
+      throws IOException, InterruptedException {
+    NavigableMap<Long, byte[]> manifestMap = Maps.newTreeMap();
+    NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap = 
Maps.newTreeMap();
+
+    if (commitRequests.isEmpty()) {
+      commitRequests.add(EMPTY_COMMIT_REQUEST);
+    }
+
+    for (CommitRequest<IcebergCommittable> request : commitRequests) {
+      manifestMap.put(request.getCommittable().checkpointId(), 
request.getCommittable().manifest());
+      commitRequestMap.put(request.getCommittable().checkpointId(), request);
+    }
+
+    long maxCommittedCheckpointId = 
getMaxCommittedCheckpointId(commitRequestMap);
+
+    // Mark the already committed FilesCommittable(s) as finished
+    commitRequestMap
+        .headMap(maxCommittedCheckpointId, true)
+        .values()
+        .forEach(CommitRequest::signalAlreadyCommitted);
+
+    // Commit the remaining
+    IcebergCommittable last = 
commitRequestMap.lastEntry().getValue().getCommittable();
+
+    NavigableMap<Long, byte[]> uncommitted =
+        Maps.newTreeMap(manifestMap).tailMap(maxCommittedCheckpointId, false);
+    if (!uncommitted.isEmpty()) {
+      commitUpToCheckpoint(
+          commitRequestMap, uncommitted, last.jobId(), last.operatorId(), 
last.checkpointId());
+    }
+  }
+
+  /**
+   * Gets the last checkpointId which is committed to this branch of the 
table. The commits are
+   * identified by the {@link IcebergCommittable} 
(jobId/operatorId/checkpointId). Only used by
+   * {@link IcebergCommittable} (SinkV2).
+   *
+   * @param requests The {@link IcebergCommittable}s ordered by checkpointId
+   * @return The checkpointId for the first commit (historically backward)
+   */
+  public long getMaxCommittedCheckpointId(

Review Comment:
   this was a static method from the old code. can we reuse it here?



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/WriteResultSerializer.java:
##########
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.core.memory.DataInputDeserializer;
+import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
+import org.apache.flink.util.InstantiationUtil;
+import org.apache.iceberg.io.WriteResult;
+
+public class WriteResultSerializer implements 
SimpleVersionedSerializer<WriteResult> {
+  private static final int VERSION_0 = 0;

Review Comment:
   for consistency, `VERSION` marks the current/latest version as returned by 
`getVersion`. deserialize use constants directly for all supported versions.



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/IcebergSink.java:
##########
@@ -0,0 +1,767 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink;
+
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION;
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION_STRATEGY;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.Function;
+import org.apache.flink.api.common.functions.MapFunction;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.api.connector.sink2.CommitterInitContext;
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.api.connector.sink2.SupportsCommitter;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.streaming.api.connector.sink2.CommittableMessage;
+import 
org.apache.flink.streaming.api.connector.sink2.CommittableMessageTypeInfo;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPostCommitTopology;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPreCommitTopology;
+import org.apache.flink.streaming.api.connector.sink2.SupportsPreWriteTopology;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.util.DataFormatConverters;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.Preconditions;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DistributionMode;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.PartitionField;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.SerializableTable;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.FlinkSchemaUtil;
+import org.apache.iceberg.flink.FlinkWriteConf;
+import org.apache.iceberg.flink.FlinkWriteOptions;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittable;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittableSerializer;
+import org.apache.iceberg.flink.sink.committer.IcebergCommitter;
+import org.apache.iceberg.flink.sink.committer.IcebergWriteAggregator;
+import org.apache.iceberg.flink.sink.committer.WriteResultSerializer;
+import org.apache.iceberg.flink.util.FlinkCompatibilityUtil;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.types.TypeUtil;
+import org.apache.iceberg.util.SerializableSupplier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Flink v2 sink offer different hooks to insert custom topologies into the 
sink. We will use the
+ * following:
+ *
+ * <ul>
+ *   <li>{@link SupportsPreWriteTopology} which redistributes the data to the 
writers based on the
+ *       {@link DistributionMode}
+ *   <li>{@link org.apache.flink.api.connector.sink2.SinkWriter} which writes 
data files, and
+ *       generates a {@link IcebergCommittable} to store the {@link
+ *       org.apache.iceberg.io.WriteResult}
+ *   <li>{@link SupportsPreCommitTopology} which we use to to place the {@link
+ *       IcebergWriteAggregator} which merges the individual {@link
+ *       org.apache.flink.api.connector.sink2.SinkWriter}'s {@link
+ *       org.apache.iceberg.io.WriteResult}s to a single {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}
+ *   <li>{@link IcebergCommitter} which stores the incoming {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}s in state for recovery, 
and commits them to
+ *       the Iceberg table
+ *   <li>{@link SupportsPostCommitTopology} we could use for incremental 
compaction later. This is
+ *       not implemented yet.
+ * </ul>
+ *
+ * The job graph looks like below:
+ *
+ * <pre>{@code
+ *                            Flink sink
+ *               
+-----------------------------------------------------------------------------------+
+ *               |                                                             
                      |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ * | Map 1 | ==> | | writer 1 |                               | committer 1 | 
---> | post commit 1 | |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ *               |             \                             /                
\                      |
+ *               |              \                           /                  
\                     |
+ *               |               \                         /                   
 \                    |
+ * +-------+     | +----------+   \ +-------------------+ /   +-------------+  
  \ +---------------+ |
+ * | Map 2 | ==> | | writer 2 | --->| commit aggregator |     | committer 2 |  
    | post commit 2 | |
+ * +-------+     | +----------+     +-------------------+     +-------------+  
    +---------------+ |
+ *               |                                             Commit only on  
                      |
+ *               |                                             committer 1     
                      |
+ *               
+-----------------------------------------------------------------------------------+
+ * }</pre>
+ */
+public class IcebergSink
+    implements Sink<RowData>,
+        SupportsPreWriteTopology<RowData>,
+        SupportsCommitter<IcebergCommittable>,
+        SupportsPreCommitTopology<WriteResult, IcebergCommittable>,
+        SupportsPostCommitTopology<IcebergCommittable> {
+  private Table initTable;
+  private final TableLoader tableLoader;
+  private final Map<String, String> snapshotProperties;
+  private String uidPrefix;
+  private Committer<IcebergCommittable> sinkCommitter;
+  private final String sinkId;
+  private transient Builder builder;
+  private Map<String, String> writeProperties;
+  private RowType flinkRowType;
+  private SerializableSupplier<Table> tableSupplier;
+  private transient FlinkWriteConf flinkWriteConf;
+  private List<Integer> equalityFieldIds;
+  private boolean upsertMode;
+  private FileFormat dataFileFormat;
+  private long targetDataFileSize;
+  private String branch;
+  private boolean overwriteMode;
+  private int workerPoolSize;
+  private static final Logger LOG = LoggerFactory.getLogger(IcebergSink.class);
+
+  private IcebergSink(Builder builder) {
+    this.builder = builder;
+    this.tableLoader = builder.tableLoader();
+    this.snapshotProperties = builder.snapshotSummary;
+
+    // We generate a random UUID every time when a sink is created.
+    // This is used to separate files generated by different sinks writing the 
same table.
+    // Also used to generate the aggregator operator name
+    this.sinkId = UUID.randomUUID().toString();
+  }
+
+  @Override
+  public SinkWriter<RowData> createWriter(InitContext context) {
+    RowDataTaskWriterFactory taskWriterFactory =
+        new RowDataTaskWriterFactory(
+            tableSupplier,
+            flinkRowType,
+            targetDataFileSize,
+            dataFileFormat,
+            writeProperties,
+            equalityFieldIds,
+            upsertMode);
+    IcebergStreamWriterMetrics metrics =
+        new IcebergStreamWriterMetrics(context.metricGroup(), 
initTable.name());
+    return new IcebergSinkWriter(
+        tableSupplier,
+        taskWriterFactory,
+        metrics,
+        context.getSubtaskId(),
+        context.getAttemptNumber());
+  }
+
+  @Override
+  public Committer<IcebergCommittable> createCommitter(CommitterInitContext 
context) {
+    IcebergFilesCommitterMetrics metrics =
+        new IcebergFilesCommitterMetrics(context.metricGroup(), 
initTable.name());
+    return new IcebergCommitter(
+        tableLoader, branch, snapshotProperties, overwriteMode, 
workerPoolSize, sinkId, metrics);
+  }
+
+  @Override
+  public SimpleVersionedSerializer<IcebergCommittable> 
getCommittableSerializer() {
+    return new IcebergCommittableSerializer();
+  }
+
+  @Override
+  public void addPostCommitTopology(
+      DataStream<CommittableMessage<IcebergCommittable>> committables) {
+    // TODO Support small file compaction
+  }
+
+  @Override
+  public DataStream<RowData> addPreWriteTopology(DataStream<RowData> 
inputDataStream) {
+    return distributeDataStream(inputDataStream);
+  }
+
+  // global forces all output records send to subtask 0 of the downstream 
committer operator.

Review Comment:
   it is probably better to keep it next to the `.global()` than method Javadoc



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/IcebergSink.java:
##########
@@ -0,0 +1,767 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink;
+
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION;
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION_STRATEGY;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.Function;
+import org.apache.flink.api.common.functions.MapFunction;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.api.connector.sink2.CommitterInitContext;
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.api.connector.sink2.SupportsCommitter;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.streaming.api.connector.sink2.CommittableMessage;
+import 
org.apache.flink.streaming.api.connector.sink2.CommittableMessageTypeInfo;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPostCommitTopology;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPreCommitTopology;
+import org.apache.flink.streaming.api.connector.sink2.SupportsPreWriteTopology;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.util.DataFormatConverters;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.Preconditions;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DistributionMode;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.PartitionField;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.SerializableTable;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.FlinkSchemaUtil;
+import org.apache.iceberg.flink.FlinkWriteConf;
+import org.apache.iceberg.flink.FlinkWriteOptions;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittable;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittableSerializer;
+import org.apache.iceberg.flink.sink.committer.IcebergCommitter;
+import org.apache.iceberg.flink.sink.committer.IcebergWriteAggregator;
+import org.apache.iceberg.flink.sink.committer.WriteResultSerializer;
+import org.apache.iceberg.flink.util.FlinkCompatibilityUtil;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.types.TypeUtil;
+import org.apache.iceberg.util.SerializableSupplier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Flink v2 sink offer different hooks to insert custom topologies into the 
sink. We will use the
+ * following:
+ *
+ * <ul>
+ *   <li>{@link SupportsPreWriteTopology} which redistributes the data to the 
writers based on the
+ *       {@link DistributionMode}
+ *   <li>{@link org.apache.flink.api.connector.sink2.SinkWriter} which writes 
data files, and
+ *       generates a {@link IcebergCommittable} to store the {@link
+ *       org.apache.iceberg.io.WriteResult}
+ *   <li>{@link SupportsPreCommitTopology} which we use to to place the {@link
+ *       IcebergWriteAggregator} which merges the individual {@link
+ *       org.apache.flink.api.connector.sink2.SinkWriter}'s {@link
+ *       org.apache.iceberg.io.WriteResult}s to a single {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}
+ *   <li>{@link IcebergCommitter} which stores the incoming {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}s in state for recovery, 
and commits them to
+ *       the Iceberg table
+ *   <li>{@link SupportsPostCommitTopology} we could use for incremental 
compaction later. This is
+ *       not implemented yet.
+ * </ul>
+ *
+ * The job graph looks like below:
+ *
+ * <pre>{@code
+ *                            Flink sink
+ *               
+-----------------------------------------------------------------------------------+
+ *               |                                                             
                      |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ * | Map 1 | ==> | | writer 1 |                               | committer 1 | 
---> | post commit 1 | |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ *               |             \                             /                
\                      |
+ *               |              \                           /                  
\                     |
+ *               |               \                         /                   
 \                    |
+ * +-------+     | +----------+   \ +-------------------+ /   +-------------+  
  \ +---------------+ |
+ * | Map 2 | ==> | | writer 2 | --->| commit aggregator |     | committer 2 |  
    | post commit 2 | |
+ * +-------+     | +----------+     +-------------------+     +-------------+  
    +---------------+ |
+ *               |                                             Commit only on  
                      |
+ *               |                                             committer 1     
                      |
+ *               
+-----------------------------------------------------------------------------------+
+ * }</pre>
+ */
+public class IcebergSink
+    implements Sink<RowData>,
+        SupportsPreWriteTopology<RowData>,
+        SupportsCommitter<IcebergCommittable>,
+        SupportsPreCommitTopology<WriteResult, IcebergCommittable>,
+        SupportsPostCommitTopology<IcebergCommittable> {
+  private Table initTable;
+  private final TableLoader tableLoader;
+  private final Map<String, String> snapshotProperties;
+  private String uidPrefix;
+  private Committer<IcebergCommittable> sinkCommitter;
+  private final String sinkId;
+  private transient Builder builder;
+  private Map<String, String> writeProperties;
+  private RowType flinkRowType;
+  private SerializableSupplier<Table> tableSupplier;
+  private transient FlinkWriteConf flinkWriteConf;
+  private List<Integer> equalityFieldIds;
+  private boolean upsertMode;
+  private FileFormat dataFileFormat;
+  private long targetDataFileSize;
+  private String branch;
+  private boolean overwriteMode;
+  private int workerPoolSize;
+  private static final Logger LOG = LoggerFactory.getLogger(IcebergSink.class);
+
+  private IcebergSink(Builder builder) {
+    this.builder = builder;
+    this.tableLoader = builder.tableLoader();

Review Comment:
   curious why some class members are final and set in the constructor here 
while others are final and set in the builder?



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommitter.java:
##########
@@ -0,0 +1,434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ReplacePartitions;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotUpdate;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.CommitSummary;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.IcebergFilesCommitterMetrics;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the Flink SinkV2 {@link Committer} interface to 
implement the Iceberg
+ * commits. The implementation builds on the following assumptions:
+ *
+ * <ul>
+ *   <li>There is a single {@link IcebergCommittable} for every checkpoint
+ *   <li>There is no late checkpoint - if checkpoint 'x' has received in one 
call, then after a
+ *       successful run only checkpoints &gt; x will arrive
+ *   <li>There is no other writer which would generate another commit to the 
same branch with the
+ *       same jobId-operatorId-checkpointId triplet
+ * </ul>
+ */
+public class IcebergCommitter implements Committer<IcebergCommittable>, 
Serializable {
+  private static final long serialVersionUID = 1L;
+  private static final String MAX_COMMITTED_CHECKPOINT_ID = 
"flink.max-committed-checkpoint-id";
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergCommitter.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private static final CommitRequest<IcebergCommittable> EMPTY_COMMIT_REQUEST =
+      new CommitRequest<IcebergCommittable>() {
+        @Override
+        public IcebergCommittable getCommittable() {
+          return new IcebergCommittable(EMPTY_MANIFEST_DATA, "jobId", 
"operatorId", -1L);
+        }
+
+        @Override
+        public int getNumberOfRetries() {
+          return 0;
+        }
+
+        @Override
+        public void signalFailedWithKnownReason(Throwable t) {}
+
+        @Override
+        public void signalFailedWithUnknownReason(Throwable t) {}
+
+        @Override
+        public void retryLater() {}
+
+        @Override
+        public void updateAndRetryLater(IcebergCommittable committable) {}
+
+        @Override
+        public void signalAlreadyCommitted() {}
+      };
+
+  @VisibleForTesting
+  static final String MAX_CONTINUOUS_EMPTY_COMMITS = 
"flink.max-continuous-empty-commits";
+
+  public static final long INITIAL_CHECKPOINT_ID = -1L;
+  public static final String FLINK_JOB_ID = "flink.job-id";
+  public static final String OPERATOR_ID = "flink.operator-id";
+  private final String branch;
+  private final Map<String, String> snapshotProperties;
+  private final boolean replacePartitions;
+  private final int workerPoolSize;
+  private final String prefix;
+  private transient IcebergFilesCommitterMetrics committerMetrics;
+  private transient Table table;
+  private transient int maxContinuousEmptyCommits;
+  private transient ExecutorService workerPool;
+  private transient int continuousEmptyCheckpoints = 0;
+
+  public IcebergCommitter(
+      TableLoader tableLoader,
+      String branch,
+      Map<String, String> snapshotProperties,
+      boolean replacePartitions,
+      int workerPoolSize,
+      String prefix,
+      IcebergFilesCommitterMetrics committerMetrics) {
+    this.branch = branch;
+    this.snapshotProperties = snapshotProperties;
+    this.replacePartitions = replacePartitions;
+    this.workerPoolSize = workerPoolSize;
+    this.prefix = prefix;
+    this.committerMetrics = committerMetrics;
+
+    if (!tableLoader.isOpen()) {
+      tableLoader.open();
+    }
+
+    this.table = tableLoader.loadTable();
+    this.maxContinuousEmptyCommits =
+        PropertyUtil.propertyAsInt(table.properties(), 
MAX_CONTINUOUS_EMPTY_COMMITS, 10);
+    Preconditions.checkArgument(
+        maxContinuousEmptyCommits > 0, MAX_CONTINUOUS_EMPTY_COMMITS + " must 
be positive");
+    this.workerPool =
+        ThreadPools.newWorkerPool("iceberg-worker-pool-" + table + "-" + 
prefix, workerPoolSize);
+    this.continuousEmptyCheckpoints = 0;
+  }
+
+  @Override
+  public void commit(Collection<CommitRequest<IcebergCommittable>> 
commitRequests)
+      throws IOException, InterruptedException {
+    NavigableMap<Long, byte[]> manifestMap = Maps.newTreeMap();
+    NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap = 
Maps.newTreeMap();
+
+    if (commitRequests.isEmpty()) {
+      commitRequests.add(EMPTY_COMMIT_REQUEST);
+    }
+
+    for (CommitRequest<IcebergCommittable> request : commitRequests) {
+      manifestMap.put(request.getCommittable().checkpointId(), 
request.getCommittable().manifest());
+      commitRequestMap.put(request.getCommittable().checkpointId(), request);
+    }
+
+    long maxCommittedCheckpointId = 
getMaxCommittedCheckpointId(commitRequestMap);
+
+    // Mark the already committed FilesCommittable(s) as finished
+    commitRequestMap
+        .headMap(maxCommittedCheckpointId, true)
+        .values()
+        .forEach(CommitRequest::signalAlreadyCommitted);
+
+    // Commit the remaining
+    IcebergCommittable last = 
commitRequestMap.lastEntry().getValue().getCommittable();
+
+    NavigableMap<Long, byte[]> uncommitted =
+        Maps.newTreeMap(manifestMap).tailMap(maxCommittedCheckpointId, false);
+    if (!uncommitted.isEmpty()) {
+      commitUpToCheckpoint(
+          commitRequestMap, uncommitted, last.jobId(), last.operatorId(), 
last.checkpointId());

Review Comment:
   let's also consider another scenario where the commit requests may 
accumulated over multiple jobs (after checkpoint and restart). what should be 
the expected behavior?
   
   also noticed that in v1 case multiple commitable can be collapsed to a 
single commit to Iceberg, while in v2 table they are committed separately and 
sequentially. 



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommitter.java:
##########
@@ -0,0 +1,434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ReplacePartitions;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotUpdate;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.CommitSummary;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.IcebergFilesCommitterMetrics;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the Flink SinkV2 {@link Committer} interface to 
implement the Iceberg
+ * commits. The implementation builds on the following assumptions:
+ *
+ * <ul>
+ *   <li>There is a single {@link IcebergCommittable} for every checkpoint
+ *   <li>There is no late checkpoint - if checkpoint 'x' has received in one 
call, then after a
+ *       successful run only checkpoints &gt; x will arrive
+ *   <li>There is no other writer which would generate another commit to the 
same branch with the
+ *       same jobId-operatorId-checkpointId triplet
+ * </ul>
+ */
+public class IcebergCommitter implements Committer<IcebergCommittable>, 
Serializable {
+  private static final long serialVersionUID = 1L;
+  private static final String MAX_COMMITTED_CHECKPOINT_ID = 
"flink.max-committed-checkpoint-id";
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergCommitter.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private static final CommitRequest<IcebergCommittable> EMPTY_COMMIT_REQUEST =
+      new CommitRequest<IcebergCommittable>() {
+        @Override
+        public IcebergCommittable getCommittable() {
+          return new IcebergCommittable(EMPTY_MANIFEST_DATA, "jobId", 
"operatorId", -1L);
+        }
+
+        @Override
+        public int getNumberOfRetries() {
+          return 0;
+        }
+
+        @Override
+        public void signalFailedWithKnownReason(Throwable t) {}
+
+        @Override
+        public void signalFailedWithUnknownReason(Throwable t) {}
+
+        @Override
+        public void retryLater() {}
+
+        @Override
+        public void updateAndRetryLater(IcebergCommittable committable) {}
+
+        @Override
+        public void signalAlreadyCommitted() {}
+      };
+
+  @VisibleForTesting
+  static final String MAX_CONTINUOUS_EMPTY_COMMITS = 
"flink.max-continuous-empty-commits";
+
+  public static final long INITIAL_CHECKPOINT_ID = -1L;
+  public static final String FLINK_JOB_ID = "flink.job-id";
+  public static final String OPERATOR_ID = "flink.operator-id";
+  private final String branch;
+  private final Map<String, String> snapshotProperties;
+  private final boolean replacePartitions;
+  private final int workerPoolSize;
+  private final String prefix;
+  private transient IcebergFilesCommitterMetrics committerMetrics;
+  private transient Table table;
+  private transient int maxContinuousEmptyCommits;
+  private transient ExecutorService workerPool;
+  private transient int continuousEmptyCheckpoints = 0;
+
+  public IcebergCommitter(
+      TableLoader tableLoader,
+      String branch,
+      Map<String, String> snapshotProperties,
+      boolean replacePartitions,
+      int workerPoolSize,
+      String prefix,
+      IcebergFilesCommitterMetrics committerMetrics) {
+    this.branch = branch;
+    this.snapshotProperties = snapshotProperties;
+    this.replacePartitions = replacePartitions;
+    this.workerPoolSize = workerPoolSize;
+    this.prefix = prefix;
+    this.committerMetrics = committerMetrics;
+
+    if (!tableLoader.isOpen()) {
+      tableLoader.open();
+    }
+
+    this.table = tableLoader.loadTable();
+    this.maxContinuousEmptyCommits =
+        PropertyUtil.propertyAsInt(table.properties(), 
MAX_CONTINUOUS_EMPTY_COMMITS, 10);
+    Preconditions.checkArgument(
+        maxContinuousEmptyCommits > 0, MAX_CONTINUOUS_EMPTY_COMMITS + " must 
be positive");
+    this.workerPool =
+        ThreadPools.newWorkerPool("iceberg-worker-pool-" + table + "-" + 
prefix, workerPoolSize);
+    this.continuousEmptyCheckpoints = 0;
+  }
+
+  @Override
+  public void commit(Collection<CommitRequest<IcebergCommittable>> 
commitRequests)
+      throws IOException, InterruptedException {
+    NavigableMap<Long, byte[]> manifestMap = Maps.newTreeMap();
+    NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap = 
Maps.newTreeMap();
+
+    if (commitRequests.isEmpty()) {
+      commitRequests.add(EMPTY_COMMIT_REQUEST);

Review Comment:
   why do we add an empty commit request in this case? can the method simply 
return here?



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/IcebergSink.java:
##########
@@ -0,0 +1,767 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink;
+
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION;
+import static org.apache.iceberg.TableProperties.AVRO_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION;
+import static org.apache.iceberg.TableProperties.ORC_COMPRESSION_STRATEGY;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION;
+import static org.apache.iceberg.TableProperties.PARQUET_COMPRESSION_LEVEL;
+import static org.apache.iceberg.TableProperties.WRITE_DISTRIBUTION_MODE;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.Function;
+import org.apache.flink.api.common.functions.MapFunction;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.api.connector.sink2.CommitterInitContext;
+import org.apache.flink.api.connector.sink2.Sink;
+import org.apache.flink.api.connector.sink2.SinkWriter;
+import org.apache.flink.api.connector.sink2.SupportsCommitter;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.configuration.ReadableConfig;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.streaming.api.connector.sink2.CommittableMessage;
+import 
org.apache.flink.streaming.api.connector.sink2.CommittableMessageTypeInfo;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPostCommitTopology;
+import 
org.apache.flink.streaming.api.connector.sink2.SupportsPreCommitTopology;
+import org.apache.flink.streaming.api.connector.sink2.SupportsPreWriteTopology;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.datastream.DataStreamSink;
+import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
+import org.apache.flink.table.api.TableSchema;
+import org.apache.flink.table.data.RowData;
+import org.apache.flink.table.data.util.DataFormatConverters;
+import org.apache.flink.table.types.DataType;
+import org.apache.flink.table.types.logical.RowType;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.Preconditions;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DistributionMode;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.PartitionField;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.SerializableTable;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.FlinkSchemaUtil;
+import org.apache.iceberg.flink.FlinkWriteConf;
+import org.apache.iceberg.flink.FlinkWriteOptions;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittable;
+import org.apache.iceberg.flink.sink.committer.IcebergCommittableSerializer;
+import org.apache.iceberg.flink.sink.committer.IcebergCommitter;
+import org.apache.iceberg.flink.sink.committer.IcebergWriteAggregator;
+import org.apache.iceberg.flink.sink.committer.WriteResultSerializer;
+import org.apache.iceberg.flink.util.FlinkCompatibilityUtil;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.types.TypeUtil;
+import org.apache.iceberg.util.SerializableSupplier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Flink v2 sink offer different hooks to insert custom topologies into the 
sink. We will use the
+ * following:
+ *
+ * <ul>
+ *   <li>{@link SupportsPreWriteTopology} which redistributes the data to the 
writers based on the
+ *       {@link DistributionMode}
+ *   <li>{@link org.apache.flink.api.connector.sink2.SinkWriter} which writes 
data files, and
+ *       generates a {@link IcebergCommittable} to store the {@link
+ *       org.apache.iceberg.io.WriteResult}
+ *   <li>{@link SupportsPreCommitTopology} which we use to to place the {@link
+ *       IcebergWriteAggregator} which merges the individual {@link
+ *       org.apache.flink.api.connector.sink2.SinkWriter}'s {@link
+ *       org.apache.iceberg.io.WriteResult}s to a single {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}
+ *   <li>{@link IcebergCommitter} which stores the incoming {@link
+ *       org.apache.iceberg.flink.sink.DeltaManifests}s in state for recovery, 
and commits them to
+ *       the Iceberg table
+ *   <li>{@link SupportsPostCommitTopology} we could use for incremental 
compaction later. This is
+ *       not implemented yet.
+ * </ul>
+ *
+ * The job graph looks like below:
+ *
+ * <pre>{@code
+ *                            Flink sink
+ *               
+-----------------------------------------------------------------------------------+
+ *               |                                                             
                      |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ * | Map 1 | ==> | | writer 1 |                               | committer 1 | 
---> | post commit 1 | |
+ * +-------+     | +----------+                               +-------------+  
    +---------------+ |
+ *               |             \                             /                
\                      |
+ *               |              \                           /                  
\                     |
+ *               |               \                         /                   
 \                    |
+ * +-------+     | +----------+   \ +-------------------+ /   +-------------+  
  \ +---------------+ |
+ * | Map 2 | ==> | | writer 2 | --->| commit aggregator |     | committer 2 |  
    | post commit 2 | |
+ * +-------+     | +----------+     +-------------------+     +-------------+  
    +---------------+ |
+ *               |                                             Commit only on  
                      |
+ *               |                                             committer 1     
                      |
+ *               
+-----------------------------------------------------------------------------------+
+ * }</pre>
+ */
+public class IcebergSink
+    implements Sink<RowData>,
+        SupportsPreWriteTopology<RowData>,
+        SupportsCommitter<IcebergCommittable>,
+        SupportsPreCommitTopology<WriteResult, IcebergCommittable>,
+        SupportsPostCommitTopology<IcebergCommittable> {
+  private Table initTable;
+  private final TableLoader tableLoader;
+  private final Map<String, String> snapshotProperties;
+  private String uidPrefix;
+  private Committer<IcebergCommittable> sinkCommitter;
+  private final String sinkId;
+  private transient Builder builder;
+  private Map<String, String> writeProperties;
+  private RowType flinkRowType;
+  private SerializableSupplier<Table> tableSupplier;
+  private transient FlinkWriteConf flinkWriteConf;
+  private List<Integer> equalityFieldIds;
+  private boolean upsertMode;
+  private FileFormat dataFileFormat;
+  private long targetDataFileSize;
+  private String branch;
+  private boolean overwriteMode;
+  private int workerPoolSize;
+  private static final Logger LOG = LoggerFactory.getLogger(IcebergSink.class);
+
+  private IcebergSink(Builder builder) {
+    this.builder = builder;
+    this.tableLoader = builder.tableLoader();
+    this.snapshotProperties = builder.snapshotSummary;
+
+    // We generate a random UUID every time when a sink is created.
+    // This is used to separate files generated by different sinks writing the 
same table.
+    // Also used to generate the aggregator operator name
+    this.sinkId = UUID.randomUUID().toString();
+  }
+
+  @Override
+  public SinkWriter<RowData> createWriter(InitContext context) {
+    RowDataTaskWriterFactory taskWriterFactory =
+        new RowDataTaskWriterFactory(
+            tableSupplier,
+            flinkRowType,
+            targetDataFileSize,
+            dataFileFormat,
+            writeProperties,
+            equalityFieldIds,
+            upsertMode);
+    IcebergStreamWriterMetrics metrics =
+        new IcebergStreamWriterMetrics(context.metricGroup(), 
initTable.name());
+    return new IcebergSinkWriter(
+        tableSupplier,
+        taskWriterFactory,
+        metrics,
+        context.getSubtaskId(),
+        context.getAttemptNumber());
+  }
+
+  @Override
+  public Committer<IcebergCommittable> createCommitter(CommitterInitContext 
context) {
+    IcebergFilesCommitterMetrics metrics =
+        new IcebergFilesCommitterMetrics(context.metricGroup(), 
initTable.name());
+    return new IcebergCommitter(
+        tableLoader, branch, snapshotProperties, overwriteMode, 
workerPoolSize, sinkId, metrics);
+  }
+
+  @Override
+  public SimpleVersionedSerializer<IcebergCommittable> 
getCommittableSerializer() {
+    return new IcebergCommittableSerializer();
+  }
+
+  @Override
+  public void addPostCommitTopology(
+      DataStream<CommittableMessage<IcebergCommittable>> committables) {
+    // TODO Support small file compaction
+  }
+
+  @Override
+  public DataStream<RowData> addPreWriteTopology(DataStream<RowData> 
inputDataStream) {
+    return distributeDataStream(inputDataStream);
+  }
+
+  // global forces all output records send to subtask 0 of the downstream 
committer operator.
+  // This is to ensure commit only happen in one committer subtask.
+  // Once upstream Flink provides the capability of setting committer operator
+  // parallelism to 1, this can be removed.
+  @Override
+  public DataStream<CommittableMessage<IcebergCommittable>> 
addPreCommitTopology(
+      DataStream<CommittableMessage<WriteResult>> writeResults) {
+    TypeInformation<CommittableMessage<IcebergCommittable>> typeInformation =
+        CommittableMessageTypeInfo.of(this::getCommittableSerializer);
+    return writeResults
+        .global()
+        .transform(
+            prefixIfNotNull(uidPrefix, initTable.name() + "-" + sinkId + 
"-pre-commit-topology"),
+            typeInformation,
+            new IcebergWriteAggregator(tableLoader, sinkId))
+        .uid(prefixIfNotNull(uidPrefix, "pre-commit-topology"))
+        .setParallelism(1)
+        .setMaxParallelism(1)
+        .global();
+  }
+
+  @Override
+  public SimpleVersionedSerializer<WriteResult> getWriteResultSerializer() {
+    return new WriteResultSerializer();
+  }
+
+  public static class Builder {
+    private Function<String, DataStream<RowData>> inputCreator = null;
+    private TableLoader tableLoader;
+    private Table table;
+    private TableSchema tableSchema;
+    private List<String> equalityFieldColumns = null;
+    private String uidPrefix = null;
+    private final Map<String, String> snapshotSummary = Maps.newHashMap();
+    private ReadableConfig readableConfig = new Configuration();
+    private final Map<String, String> writeOptions = Maps.newHashMap();
+
+    private Builder() {}
+
+    private Builder forRowData(DataStream<RowData> newRowDataInput) {
+      this.inputCreator = ignored -> newRowDataInput;
+      return this;
+    }
+
+    private Builder forRow(DataStream<Row> input, TableSchema 
inputTableSchema) {
+      RowType rowType = (RowType) 
inputTableSchema.toRowDataType().getLogicalType();
+      DataType[] fieldDataTypes = inputTableSchema.getFieldDataTypes();
+
+      DataFormatConverters.RowConverter rowConverter =
+          new DataFormatConverters.RowConverter(fieldDataTypes);
+      return forMapperOutputType(
+              input, rowConverter::toInternal, 
FlinkCompatibilityUtil.toTypeInfo(rowType))
+          .tableSchema(inputTableSchema);
+    }
+
+    private <T> Builder forMapperOutputType(
+        DataStream<T> input, MapFunction<T, RowData> mapper, 
TypeInformation<RowData> outputType) {
+      this.inputCreator =
+          newUidPrefix -> {
+            // Input stream order is crucial for some situation(e.g. in cdc 
case). Therefore, we
+            // need to set the parallelism of map operator same as its input 
to keep map operator
+            // chaining its input, and avoid rebalanced by default.
+            SingleOutputStreamOperator<RowData> inputStream =
+                input.map(mapper, 
outputType).setParallelism(input.getParallelism());
+            if (newUidPrefix != null) {
+              inputStream.name(operatorName(newUidPrefix)).uid(newUidPrefix + 
"-mapper");
+            }
+            return inputStream;
+          };
+      return this;
+    }
+
+    /**
+     * This iceberg {@link Table} instance is used for initializing {@link 
IcebergStreamWriter}
+     * which will write all the records into {@link DataFile}s and emit them 
to downstream operator.
+     * Providing a table would avoid so many table loading from each separate 
task.
+     *
+     * @param newTable the loaded iceberg table instance.
+     * @return {@link Builder} to connect the iceberg table.
+     */
+    public Builder table(Table newTable) {
+      this.table = newTable;
+      return this;
+    }
+
+    /**
+     * The table loader is used for loading tables in {@link IcebergCommitter} 
lazily, we need this
+     * loader because {@link Table} is not serializable and could not just use 
the loaded table from
+     * Builder#table in the remote task manager.
+     *
+     * @param newTableLoader to load iceberg table inside tasks.
+     * @return {@link Builder} to connect the iceberg table.
+     */
+    public Builder tableLoader(TableLoader newTableLoader) {
+      this.tableLoader = newTableLoader;
+      return this;
+    }
+
+    TableLoader tableLoader() {
+      return tableLoader;
+    }
+
+    /**
+     * Set the write properties for IcebergSink. View the supported properties 
in {@link
+     * FlinkWriteOptions}
+     */
+    public Builder set(String property, String value) {
+      writeOptions.put(property, value);
+      return this;
+    }
+
+    /**
+     * Set the write properties for IcebergSink. View the supported properties 
in {@link
+     * FlinkWriteOptions}
+     */
+    public Builder setAll(Map<String, String> properties) {
+      writeOptions.putAll(properties);
+      return this;
+    }
+
+    public Builder tableSchema(TableSchema newTableSchema) {
+      this.tableSchema = newTableSchema;
+      return this;
+    }
+
+    public Builder overwrite(boolean newOverwrite) {
+      writeOptions.put(FlinkWriteOptions.OVERWRITE_MODE.key(), 
Boolean.toString(newOverwrite));
+      return this;
+    }
+
+    public Builder flinkConf(ReadableConfig config) {
+      this.readableConfig = config;
+      return this;
+    }
+
+    /**
+     * Configure the write {@link DistributionMode} that the IcebergSink will 
use. Currently, flink
+     * support {@link DistributionMode#NONE} and {@link DistributionMode#HASH}.
+     *
+     * @param mode to specify the write distribution mode.
+     * @return {@link Builder} to connect the iceberg table.
+     */
+    public Builder distributionMode(DistributionMode mode) {
+      Preconditions.checkArgument(
+          !DistributionMode.RANGE.equals(mode),
+          "Flink does not support 'range' write distribution mode now.");
+      if (mode != null) {
+        writeOptions.put(FlinkWriteOptions.DISTRIBUTION_MODE.key(), 
mode.modeName());
+      }
+      return this;
+    }
+
+    /**
+     * Configuring the write parallel number for iceberg stream writer.
+     *
+     * @param newWriteParallelism the number of parallel iceberg stream writer.
+     * @return {@link Builder} to connect the iceberg table.
+     */
+    public Builder writeParallelism(int newWriteParallelism) {
+      writeOptions.put(
+          FlinkWriteOptions.WRITE_PARALLELISM.key(), 
Integer.toString(newWriteParallelism));
+      return this;
+    }
+
+    /**
+     * All INSERT/UPDATE_AFTER events from input stream will be transformed to 
UPSERT events, which
+     * means it will DELETE the old records and then INSERT the new records. 
In partitioned table,
+     * the partition fields should be a subset of equality fields, otherwise 
the old row that
+     * located in partition-A could not be deleted by the new row that located 
in partition-B.
+     *
+     * @param enabled indicate whether it should transform all 
INSERT/UPDATE_AFTER events to UPSERT.
+     * @return {@link Builder} to connect the iceberg table.
+     */
+    public Builder upsert(boolean enabled) {
+      writeOptions.put(FlinkWriteOptions.WRITE_UPSERT_ENABLED.key(), 
Boolean.toString(enabled));
+      return this;
+    }
+
+    /**
+     * Configuring the equality field columns for iceberg table that accept 
CDC or UPSERT events.
+     *
+     * @param columns defines the iceberg table's key.
+     * @return {@link Builder} to connect the iceberg table.
+     */
+    public Builder equalityFieldColumns(List<String> columns) {
+      this.equalityFieldColumns = columns;
+      return this;
+    }
+
+    /**
+     * Set the uid prefix for IcebergSink operators. Note that IcebergSink 
internally consists of
+     * multiple operators (like writer, committer, aggregator) Actual operator 
uid will be appended
+     * with a suffix like "uidPrefix-writer".
+     *
+     * <p>If provided, this prefix is also applied to operator names.
+     *
+     * <p>Flink auto generates operator uid if not set explicitly. It is a 
recommended <a
+     * 
href="https://ci.apache.org/projects/flink/flink-docs-master/docs/ops/production_ready/";>
+     * best-practice to set uid for all operators</a> before deploying to 
production. Flink has an
+     * option to {@code pipeline.auto-generate-uid=false} to disable 
auto-generation and force
+     * explicit setting of all operator uid.
+     *
+     * <p>Be careful with setting this for an existing job, because now we are 
changing the operator
+     * uid from an auto-generated one to this new value. When deploying the 
change with a
+     * checkpoint, Flink won't be able to restore the previous IcebergSink 
operator state (more
+     * specifically the committer operator state). You need to use {@code 
--allowNonRestoredState}
+     * to ignore the previous sink state. During restore IcebergSink state is 
used to check if last
+     * commit was actually successful or not. {@code --allowNonRestoredState} 
can lead to data loss
+     * if the Iceberg commit failed in the last completed checkpoint.
+     *
+     * @param newPrefix prefix for Flink sink operator uid and name
+     * @return {@link Builder} to connect the iceberg table.
+     */
+    public Builder uidPrefix(String newPrefix) {
+      this.uidPrefix = newPrefix;
+      return this;
+    }
+
+    public Builder snapshotProperties(Map<String, String> properties) {
+      snapshotSummary.putAll(properties);
+      return this;
+    }
+
+    public Builder setSnapshotProperty(String property, String value) {
+      snapshotSummary.put(property, value);
+      return this;
+    }
+
+    public Builder toBranch(String branch) {
+      writeOptions.put(FlinkWriteOptions.BRANCH.key(), branch);
+      return this;
+    }
+
+    private String operatorName(String suffix) {
+      return uidPrefix != null ? uidPrefix + "-" + suffix : suffix;
+    }
+
+    @VisibleForTesting
+    List<Integer> checkAndGetEqualityFieldIds() {
+      List<Integer> equalityFieldIds = 
Lists.newArrayList(table.schema().identifierFieldIds());
+      if (equalityFieldColumns != null && !equalityFieldColumns.isEmpty()) {
+        Set<Integer> equalityFieldSet =
+            Sets.newHashSetWithExpectedSize(equalityFieldColumns.size());
+        for (String column : equalityFieldColumns) {
+          org.apache.iceberg.types.Types.NestedField field = 
table.schema().findField(column);
+          Preconditions.checkNotNull(
+              field,
+              "Missing required equality field column '%s' in table schema %s",
+              column,
+              table.schema());
+          equalityFieldSet.add(field.fieldId());
+        }
+
+        if (!equalityFieldSet.equals(table.schema().identifierFieldIds())) {
+          LOG.warn(
+              "The configured equality field column IDs {} are not matched 
with the schema identifier field IDs"
+                  + " {}, use job specified equality field columns as the 
equality fields by default.",
+              equalityFieldSet,
+              table.schema().identifierFieldIds());
+        }
+        equalityFieldIds = Lists.newArrayList(equalityFieldSet);
+      }
+      return equalityFieldIds;
+    }
+
+    public IcebergSink build() {
+      IcebergSink sink = new IcebergSink(this);
+      // validations
+      Preconditions.checkArgument(
+          inputCreator != null,
+          "Please use forRowData() or forMapperOutputType() to initialize the 
input DataStream.");
+      Preconditions.checkNotNull(tableLoader(), "Table loader shouldn't be 
null");
+
+      // Set the table if it is not yet set in the builder, so we can do the 
equalityId checks
+      table(checkAndGetTable(tableLoader(), table));
+
+      // Validate the equality fields and partition fields if we enable the 
upsert mode.
+      sink.equalityFieldIds = checkAndGetEqualityFieldIds();

Review Comment:
   is this style common? builder modify the target object state directly? 
should those be done in the sink constructor (instead of Builder.build method)? 
This way, the class members can be final.



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/WriteResultSerializer.java:
##########
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import org.apache.flink.core.io.SimpleVersionedSerializer;
+import org.apache.flink.core.memory.DataInputDeserializer;
+import org.apache.flink.core.memory.DataOutputViewStreamWrapper;
+import org.apache.flink.util.InstantiationUtil;
+import org.apache.iceberg.io.WriteResult;
+
+public class WriteResultSerializer implements 
SimpleVersionedSerializer<WriteResult> {
+  private static final int VERSION_0 = 0;
+
+  @Override
+  public int getVersion() {
+    return VERSION_0;
+  }
+
+  @Override
+  public byte[] serialize(WriteResult writeResult) throws IOException {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    DataOutputViewStreamWrapper view = new DataOutputViewStreamWrapper(out);
+    byte[] result = InstantiationUtil.serializeObject(writeResult);

Review Comment:
   why is this resolved without response?



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergWriteAggregator.java:
##########
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.function.Supplier;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.flink.streaming.api.connector.sink2.CommittableMessage;
+import org.apache.flink.streaming.api.connector.sink2.CommittableSummary;
+import org.apache.flink.streaming.api.connector.sink2.CommittableWithLineage;
+import org.apache.flink.streaming.api.operators.AbstractStreamOperator;
+import org.apache.flink.streaming.api.operators.OneInputStreamOperator;
+import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
+import org.apache.iceberg.DeleteFile;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ManifestFiles;
+import org.apache.iceberg.ManifestWriter;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.ManifestOutputFileFactory;
+import org.apache.iceberg.io.OutputFile;
+import org.apache.iceberg.io.WriteResult;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Operator which aggregates the individual {@link WriteResult} objects) to a 
single {@link
+ * IcebergCommittable} per checkpoint (storing the serialized {@link 
DeltaManifests}, jobId,
+ * operatorId, checkpointId)
+ */
+public class IcebergWriteAggregator
+    extends AbstractStreamOperator<CommittableMessage<IcebergCommittable>>
+    implements OneInputStreamOperator<
+        CommittableMessage<WriteResult>, 
CommittableMessage<IcebergCommittable>> {
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergWriteAggregator.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private final Collection<WriteResult> results;
+  private transient ManifestOutputFileFactory icebergManifestOutputFileFactory;
+  private transient Table table;
+  private final TableLoader tableLoader;
+  private final String prefix;
+  private static final int FORMAT_V2 = 2;
+  private static final Long DUMMY_SNAPSHOT_ID = 0L;
+
+  public IcebergWriteAggregator(TableLoader tableLoader, String prefix) {
+    this.results = Sets.newHashSet();
+    this.tableLoader = tableLoader;
+    this.prefix = prefix;
+  }
+
+  @Override
+  public void open() throws Exception {
+    if (!tableLoader.isOpen()) {
+      tableLoader.open();
+
+      this.table = tableLoader.loadTable();
+      this.icebergManifestOutputFileFactory =
+          FlinkManifestUtil.createOutputFileFactory(() -> table, 
table.properties(), prefix);
+    }
+  }
+
+  @Override
+  public void finish() throws IOException {
+    prepareSnapshotPreBarrier(Long.MAX_VALUE);
+  }
+
+  @Override
+  public void prepareSnapshotPreBarrier(long checkpointId) throws IOException {
+    IcebergCommittable committable =
+        new IcebergCommittable(
+            writeToManifest(results, checkpointId),
+            getContainingTask().getEnvironment().getJobID().toString(),
+            getRuntimeContext().getOperatorUniqueID(),
+            checkpointId);
+    CommittableMessage<IcebergCommittable> message =
+        new CommittableWithLineage<>(committable, checkpointId, 0);
+    CommittableMessage<IcebergCommittable> summary =
+        new CommittableSummary<>(0, 1, checkpointId, 1, 1, 0);
+    output.collect(new StreamRecord<>(summary));
+    output.collect(new StreamRecord<>(message));
+    LOG.info("Aggregated commit message emitted {}", message);
+    results.clear();
+  }
+
+  /**
+   * Write all the completed data files to a newly created manifest file and 
return the manifest's
+   * avro serialized bytes.
+   */
+  public byte[] writeToManifest(Collection<WriteResult> writeResults, long 
checkpointId)
+      throws IOException {
+    if (writeResults.isEmpty()) {
+      return EMPTY_MANIFEST_DATA;
+    }
+
+    WriteResult result = WriteResult.builder().addAll(writeResults).build();
+    DeltaManifests deltaManifests =
+        writeCompletedFiles(
+            result, () -> 
icebergManifestOutputFileFactory.create(checkpointId), table.spec());
+
+    return SimpleVersionedSerialization.writeVersionAndSerialize(
+        DeltaManifestsSerializer.INSTANCE, deltaManifests);
+  }
+
+  /**
+   * Write the {@link WriteResult} to temporary manifest files.
+   *
+   * @param result all those DataFiles/DeleteFiles in this WriteResult should 
be written with same
+   *     partition spec
+   */
+  public static DeltaManifests writeCompletedFiles(
+      WriteResult result, Supplier<OutputFile> outputFileSupplier, 
PartitionSpec spec)
+      throws IOException {
+
+    ManifestFile dataManifest = null;
+    ManifestFile deleteManifest = null;
+
+    // Write the completed data files into a newly created data manifest file.
+    if (result.dataFiles() != null && result.dataFiles().length > 0) {
+      dataManifest =
+          FlinkManifestUtil.writeDataFiles(
+              outputFileSupplier.get(), spec, 
Lists.newArrayList(result.dataFiles()));
+    }
+
+    // Write the completed delete files into a newly created delete manifest 
file.
+    if (result.deleteFiles() != null && result.deleteFiles().length > 0) {
+      OutputFile deleteManifestFile = outputFileSupplier.get();
+
+      ManifestWriter<DeleteFile> deleteManifestWriter =
+          ManifestFiles.writeDeleteManifest(FORMAT_V2, spec, 
deleteManifestFile, DUMMY_SNAPSHOT_ID);
+      try (ManifestWriter<DeleteFile> writer = deleteManifestWriter) {
+        for (DeleteFile deleteFile : result.deleteFiles()) {
+          writer.add(deleteFile);
+        }
+      }
+
+      deleteManifest = deleteManifestWriter.toManifestFile();
+    }
+
+    return new DeltaManifests(dataManifest, deleteManifest, 
result.referencedDataFiles());
+  }
+
+  @Override
+  public void processElement(StreamRecord<CommittableMessage<WriteResult>> 
element)
+      throws Exception {
+    if (element.isRecord() && element.getValue() instanceof 
CommittableWithLineage) {

Review Comment:
   I think this check `element.getValue() instanceof CommittableWithLineage` 
probably should be moved inside the if section with a `Preconditions` check. 
otherwise, if upstream changed, we can fail silently here.



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommitter.java:
##########
@@ -0,0 +1,434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ReplacePartitions;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotUpdate;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.CommitSummary;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.IcebergFilesCommitterMetrics;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the Flink SinkV2 {@link Committer} interface to 
implement the Iceberg
+ * commits. The implementation builds on the following assumptions:
+ *
+ * <ul>
+ *   <li>There is a single {@link IcebergCommittable} for every checkpoint
+ *   <li>There is no late checkpoint - if checkpoint 'x' has received in one 
call, then after a
+ *       successful run only checkpoints &gt; x will arrive
+ *   <li>There is no other writer which would generate another commit to the 
same branch with the
+ *       same jobId-operatorId-checkpointId triplet
+ * </ul>
+ */
+public class IcebergCommitter implements Committer<IcebergCommittable>, 
Serializable {
+  private static final long serialVersionUID = 1L;
+  private static final String MAX_COMMITTED_CHECKPOINT_ID = 
"flink.max-committed-checkpoint-id";
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergCommitter.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private static final CommitRequest<IcebergCommittable> EMPTY_COMMIT_REQUEST =
+      new CommitRequest<IcebergCommittable>() {
+        @Override
+        public IcebergCommittable getCommittable() {
+          return new IcebergCommittable(EMPTY_MANIFEST_DATA, "jobId", 
"operatorId", -1L);
+        }
+
+        @Override
+        public int getNumberOfRetries() {
+          return 0;
+        }
+
+        @Override
+        public void signalFailedWithKnownReason(Throwable t) {}
+
+        @Override
+        public void signalFailedWithUnknownReason(Throwable t) {}
+
+        @Override
+        public void retryLater() {}
+
+        @Override
+        public void updateAndRetryLater(IcebergCommittable committable) {}
+
+        @Override
+        public void signalAlreadyCommitted() {}
+      };
+
+  @VisibleForTesting
+  static final String MAX_CONTINUOUS_EMPTY_COMMITS = 
"flink.max-continuous-empty-commits";
+
+  public static final long INITIAL_CHECKPOINT_ID = -1L;

Review Comment:
   should we use Flink's `CheckpointStoreUtil#INVALID_CHECKPOINT_ID` instead?
   
   `INITIAL` is also not accurate here. technically, initial checkpoint id is 
`1` as defined in Flink code
   <img width="476" alt="image" 
src="https://github.com/apache/iceberg/assets/1545663/bd5037ec-1138-415f-bd65-a4e4be0159d0";>
   



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommitter.java:
##########
@@ -0,0 +1,434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ReplacePartitions;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotUpdate;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.CommitSummary;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.IcebergFilesCommitterMetrics;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the Flink SinkV2 {@link Committer} interface to 
implement the Iceberg
+ * commits. The implementation builds on the following assumptions:
+ *
+ * <ul>
+ *   <li>There is a single {@link IcebergCommittable} for every checkpoint
+ *   <li>There is no late checkpoint - if checkpoint 'x' has received in one 
call, then after a
+ *       successful run only checkpoints &gt; x will arrive
+ *   <li>There is no other writer which would generate another commit to the 
same branch with the
+ *       same jobId-operatorId-checkpointId triplet
+ * </ul>
+ */
+public class IcebergCommitter implements Committer<IcebergCommittable>, 
Serializable {
+  private static final long serialVersionUID = 1L;
+  private static final String MAX_COMMITTED_CHECKPOINT_ID = 
"flink.max-committed-checkpoint-id";
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergCommitter.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private static final CommitRequest<IcebergCommittable> EMPTY_COMMIT_REQUEST =
+      new CommitRequest<IcebergCommittable>() {
+        @Override
+        public IcebergCommittable getCommittable() {
+          return new IcebergCommittable(EMPTY_MANIFEST_DATA, "jobId", 
"operatorId", -1L);
+        }
+
+        @Override
+        public int getNumberOfRetries() {
+          return 0;
+        }
+
+        @Override
+        public void signalFailedWithKnownReason(Throwable t) {}
+
+        @Override
+        public void signalFailedWithUnknownReason(Throwable t) {}
+
+        @Override
+        public void retryLater() {}
+
+        @Override
+        public void updateAndRetryLater(IcebergCommittable committable) {}
+
+        @Override
+        public void signalAlreadyCommitted() {}
+      };
+
+  @VisibleForTesting
+  static final String MAX_CONTINUOUS_EMPTY_COMMITS = 
"flink.max-continuous-empty-commits";
+
+  public static final long INITIAL_CHECKPOINT_ID = -1L;
+  public static final String FLINK_JOB_ID = "flink.job-id";
+  public static final String OPERATOR_ID = "flink.operator-id";
+  private final String branch;
+  private final Map<String, String> snapshotProperties;
+  private final boolean replacePartitions;
+  private final int workerPoolSize;
+  private final String prefix;
+  private transient IcebergFilesCommitterMetrics committerMetrics;
+  private transient Table table;
+  private transient int maxContinuousEmptyCommits;
+  private transient ExecutorService workerPool;
+  private transient int continuousEmptyCheckpoints = 0;
+
+  public IcebergCommitter(
+      TableLoader tableLoader,
+      String branch,
+      Map<String, String> snapshotProperties,
+      boolean replacePartitions,
+      int workerPoolSize,
+      String prefix,
+      IcebergFilesCommitterMetrics committerMetrics) {
+    this.branch = branch;
+    this.snapshotProperties = snapshotProperties;
+    this.replacePartitions = replacePartitions;
+    this.workerPoolSize = workerPoolSize;
+    this.prefix = prefix;
+    this.committerMetrics = committerMetrics;
+
+    if (!tableLoader.isOpen()) {
+      tableLoader.open();
+    }
+
+    this.table = tableLoader.loadTable();
+    this.maxContinuousEmptyCommits =
+        PropertyUtil.propertyAsInt(table.properties(), 
MAX_CONTINUOUS_EMPTY_COMMITS, 10);
+    Preconditions.checkArgument(
+        maxContinuousEmptyCommits > 0, MAX_CONTINUOUS_EMPTY_COMMITS + " must 
be positive");
+    this.workerPool =
+        ThreadPools.newWorkerPool("iceberg-worker-pool-" + table + "-" + 
prefix, workerPoolSize);
+    this.continuousEmptyCheckpoints = 0;
+  }
+
+  @Override
+  public void commit(Collection<CommitRequest<IcebergCommittable>> 
commitRequests)
+      throws IOException, InterruptedException {
+    NavigableMap<Long, byte[]> manifestMap = Maps.newTreeMap();
+    NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap = 
Maps.newTreeMap();
+
+    if (commitRequests.isEmpty()) {
+      commitRequests.add(EMPTY_COMMIT_REQUEST);
+    }
+
+    for (CommitRequest<IcebergCommittable> request : commitRequests) {
+      manifestMap.put(request.getCommittable().checkpointId(), 
request.getCommittable().manifest());
+      commitRequestMap.put(request.getCommittable().checkpointId(), request);
+    }
+
+    long maxCommittedCheckpointId = 
getMaxCommittedCheckpointId(commitRequestMap);
+
+    // Mark the already committed FilesCommittable(s) as finished
+    commitRequestMap
+        .headMap(maxCommittedCheckpointId, true)
+        .values()
+        .forEach(CommitRequest::signalAlreadyCommitted);
+
+    // Commit the remaining
+    IcebergCommittable last = 
commitRequestMap.lastEntry().getValue().getCommittable();
+
+    NavigableMap<Long, byte[]> uncommitted =
+        Maps.newTreeMap(manifestMap).tailMap(maxCommittedCheckpointId, false);
+    if (!uncommitted.isEmpty()) {
+      commitUpToCheckpoint(
+          commitRequestMap, uncommitted, last.jobId(), last.operatorId(), 
last.checkpointId());

Review Comment:
   I am wondering if it is correct to use the `jobId` and `operatorId` from the 
committable or it should be the current running job? Imagine the scenario where 
the commitable was restored from a checkpoint/savepoint from a previous  job.



##########
flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/committer/IcebergCommitter.java:
##########
@@ -0,0 +1,434 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.flink.sink.committer;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.flink.api.connector.sink2.Committer;
+import org.apache.flink.core.io.SimpleVersionedSerialization;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.ManifestFile;
+import org.apache.iceberg.ReplacePartitions;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.SnapshotUpdate;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.flink.TableLoader;
+import org.apache.iceberg.flink.sink.CommitSummary;
+import org.apache.iceberg.flink.sink.DeltaManifests;
+import org.apache.iceberg.flink.sink.DeltaManifestsSerializer;
+import org.apache.iceberg.flink.sink.FlinkManifestUtil;
+import org.apache.iceberg.flink.sink.IcebergFilesCommitterMetrics;
+import org.apache.iceberg.io.WriteResult;
+import 
org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting;
+import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.PropertyUtil;
+import org.apache.iceberg.util.ThreadPools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements the Flink SinkV2 {@link Committer} interface to 
implement the Iceberg
+ * commits. The implementation builds on the following assumptions:
+ *
+ * <ul>
+ *   <li>There is a single {@link IcebergCommittable} for every checkpoint
+ *   <li>There is no late checkpoint - if checkpoint 'x' has received in one 
call, then after a
+ *       successful run only checkpoints &gt; x will arrive
+ *   <li>There is no other writer which would generate another commit to the 
same branch with the
+ *       same jobId-operatorId-checkpointId triplet
+ * </ul>
+ */
+public class IcebergCommitter implements Committer<IcebergCommittable>, 
Serializable {
+  private static final long serialVersionUID = 1L;
+  private static final String MAX_COMMITTED_CHECKPOINT_ID = 
"flink.max-committed-checkpoint-id";
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergCommitter.class);
+  private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];
+  private static final CommitRequest<IcebergCommittable> EMPTY_COMMIT_REQUEST =
+      new CommitRequest<IcebergCommittable>() {
+        @Override
+        public IcebergCommittable getCommittable() {
+          return new IcebergCommittable(EMPTY_MANIFEST_DATA, "jobId", 
"operatorId", -1L);
+        }
+
+        @Override
+        public int getNumberOfRetries() {
+          return 0;
+        }
+
+        @Override
+        public void signalFailedWithKnownReason(Throwable t) {}
+
+        @Override
+        public void signalFailedWithUnknownReason(Throwable t) {}
+
+        @Override
+        public void retryLater() {}
+
+        @Override
+        public void updateAndRetryLater(IcebergCommittable committable) {}
+
+        @Override
+        public void signalAlreadyCommitted() {}
+      };
+
+  @VisibleForTesting
+  static final String MAX_CONTINUOUS_EMPTY_COMMITS = 
"flink.max-continuous-empty-commits";
+
+  public static final long INITIAL_CHECKPOINT_ID = -1L;
+  public static final String FLINK_JOB_ID = "flink.job-id";
+  public static final String OPERATOR_ID = "flink.operator-id";
+  private final String branch;
+  private final Map<String, String> snapshotProperties;
+  private final boolean replacePartitions;
+  private final int workerPoolSize;
+  private final String prefix;
+  private transient IcebergFilesCommitterMetrics committerMetrics;
+  private transient Table table;
+  private transient int maxContinuousEmptyCommits;
+  private transient ExecutorService workerPool;
+  private transient int continuousEmptyCheckpoints = 0;
+
+  public IcebergCommitter(
+      TableLoader tableLoader,
+      String branch,
+      Map<String, String> snapshotProperties,
+      boolean replacePartitions,
+      int workerPoolSize,
+      String prefix,
+      IcebergFilesCommitterMetrics committerMetrics) {
+    this.branch = branch;
+    this.snapshotProperties = snapshotProperties;
+    this.replacePartitions = replacePartitions;
+    this.workerPoolSize = workerPoolSize;
+    this.prefix = prefix;
+    this.committerMetrics = committerMetrics;
+
+    if (!tableLoader.isOpen()) {
+      tableLoader.open();
+    }
+
+    this.table = tableLoader.loadTable();
+    this.maxContinuousEmptyCommits =
+        PropertyUtil.propertyAsInt(table.properties(), 
MAX_CONTINUOUS_EMPTY_COMMITS, 10);
+    Preconditions.checkArgument(
+        maxContinuousEmptyCommits > 0, MAX_CONTINUOUS_EMPTY_COMMITS + " must 
be positive");
+    this.workerPool =
+        ThreadPools.newWorkerPool("iceberg-worker-pool-" + table + "-" + 
prefix, workerPoolSize);
+    this.continuousEmptyCheckpoints = 0;
+  }
+
+  @Override
+  public void commit(Collection<CommitRequest<IcebergCommittable>> 
commitRequests)
+      throws IOException, InterruptedException {
+    NavigableMap<Long, byte[]> manifestMap = Maps.newTreeMap();
+    NavigableMap<Long, CommitRequest<IcebergCommittable>> commitRequestMap = 
Maps.newTreeMap();
+
+    if (commitRequests.isEmpty()) {
+      commitRequests.add(EMPTY_COMMIT_REQUEST);
+    }
+
+    for (CommitRequest<IcebergCommittable> request : commitRequests) {
+      manifestMap.put(request.getCommittable().checkpointId(), 
request.getCommittable().manifest());
+      commitRequestMap.put(request.getCommittable().checkpointId(), request);
+    }
+
+    long maxCommittedCheckpointId = 
getMaxCommittedCheckpointId(commitRequestMap);
+
+    // Mark the already committed FilesCommittable(s) as finished
+    commitRequestMap
+        .headMap(maxCommittedCheckpointId, true)
+        .values()
+        .forEach(CommitRequest::signalAlreadyCommitted);
+
+    // Commit the remaining
+    IcebergCommittable last = 
commitRequestMap.lastEntry().getValue().getCommittable();
+
+    NavigableMap<Long, byte[]> uncommitted =
+        Maps.newTreeMap(manifestMap).tailMap(maxCommittedCheckpointId, false);
+    if (!uncommitted.isEmpty()) {
+      commitUpToCheckpoint(
+          commitRequestMap, uncommitted, last.jobId(), last.operatorId(), 
last.checkpointId());

Review Comment:
   thinking about it again, current behavior seems correct with the way 
`getMaxCommittedCheckpointId` is implemented



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to