ajantha-bhat commented on code in PR #10351:
URL: https://github.com/apache/iceberg/pull/10351#discussion_r1607813508


##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/CatalogUtils.java:
##########
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect;
+
+import java.io.IOException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+import org.apache.iceberg.CatalogUtil;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.common.DynClasses;
+import org.apache.iceberg.common.DynConstructors;
+import org.apache.iceberg.common.DynMethods;
+import org.apache.iceberg.common.DynMethods.BoundMethod;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class CatalogUtils {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(CatalogUtils.class.getName());
+  private static final List<String> HADOOP_CONF_FILES =
+      ImmutableList.of("core-site.xml", "hdfs-site.xml", "hive-site.xml");
+
+  static Catalog loadCatalog(IcebergSinkConfig config) {
+    return CatalogUtil.buildIcebergCatalog(
+        config.catalogName(), config.catalogProps(), loadHadoopConfig(config));
+  }
+
+  // use reflection here to avoid requiring Hadoop as a dependency
+  private static Object loadHadoopConfig(IcebergSinkConfig config) {

Review Comment:
   Should this be moved to `org.apache.iceberg.CatalogUtil` so that it can be 
used by java API folks also if they have hadoop conf directory? 
   
   This method can accept String hadoopConfDir and config.hadoopProps() instead 
of sink config. In that case no need of this class. 



##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/IcebergSinkTask.java:
##########
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect;
+
+import java.util.Collection;
+import java.util.Map;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.connect.sink.SinkRecord;
+import org.apache.kafka.connect.sink.SinkTask;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class IcebergSinkTask extends SinkTask {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergSinkTask.class);
+
+  private IcebergSinkConfig config;
+  private Catalog catalog;
+  private Committer committer;
+
+  @Override
+  public String version() {
+    return IcebergSinkConfig.version();
+  }
+
+  @Override
+  public void start(Map<String, String> props) {
+    this.config = new IcebergSinkConfig(props);
+  }
+
+  @Override
+  public void open(Collection<TopicPartition> partitions) {
+    catalog = CatalogUtils.loadCatalog(config);
+    committer = CommitterFactory.createCommitter(config);
+    committer.start(catalog, config, context, partitions);
+  }
+
+  @Override
+  public void close(Collection<TopicPartition> partitions) {
+    close();
+  }
+
+  private void close() {
+    if (committer != null) {
+      committer.stop();
+      committer = null;
+    }
+
+    if (catalog != null) {
+      if (catalog instanceof AutoCloseable) {
+        try {
+          ((AutoCloseable) catalog).close();
+        } catch (Exception e) {
+          LOG.warn("An error occurred closing catalog instance, ignoring...", 
e);
+        }
+      }
+      catalog = null;
+    }
+  }
+
+  @Override
+  public void put(Collection<SinkRecord> sinkRecords) {
+    if (committer != null) {
+      committer.save(sinkRecords);
+    }

Review Comment:
   should throw an exception when committer is null else the producer will 
assume it has been put? 



##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/channel/Coordinator.java:
##########
@@ -0,0 +1,294 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.channel;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.time.Duration;
+import java.time.OffsetDateTime;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DeleteFile;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.connect.IcebergSinkConfig;
+import org.apache.iceberg.connect.events.CommitComplete;
+import org.apache.iceberg.connect.events.CommitToTable;
+import org.apache.iceberg.connect.events.DataWritten;
+import org.apache.iceberg.connect.events.Event;
+import org.apache.iceberg.connect.events.StartCommit;
+import org.apache.iceberg.connect.events.TableReference;
+import org.apache.iceberg.exceptions.NoSuchTableException;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.Tasks;
+import org.apache.iceberg.util.ThreadPools;
+import org.apache.kafka.clients.admin.MemberDescription;
+import org.apache.kafka.connect.sink.SinkTaskContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class Coordinator extends Channel {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Coordinator.class);
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+  private static final String COMMIT_ID_SNAPSHOT_PROP = 
"kafka.connect.commit-id";
+  private static final String VTTS_SNAPSHOT_PROP = "kafka.connect.vtts";
+  private static final Duration POLL_DURATION = Duration.ofSeconds(1);
+
+  private final Catalog catalog;
+  private final IcebergSinkConfig config;
+  private final int totalPartitionCount;
+  private final String snapshotOffsetsProp;
+  private final ExecutorService exec;
+  private final CommitState commitState;
+
+  Coordinator(
+      Catalog catalog,
+      IcebergSinkConfig config,
+      Collection<MemberDescription> members,
+      KafkaClientFactory clientFactory,
+      SinkTaskContext context) {
+    // pass consumer group ID to which we commit low watermark offsets
+    super("coordinator", config.connectGroupId() + "-coord", config, 
clientFactory, context);
+
+    this.catalog = catalog;
+    this.config = config;
+    this.totalPartitionCount =
+        members.stream().mapToInt(desc -> 
desc.assignment().topicPartitions().size()).sum();
+    this.snapshotOffsetsProp =
+        String.format(
+            "kafka.connect.offsets.%s.%s", config.controlTopic(), 
config.connectGroupId());
+    this.exec = ThreadPools.newWorkerPool("iceberg-committer", 
config.commitThreads());
+    this.commitState = new CommitState(config);
+  }
+
+  void process() {
+    if (commitState.isCommitIntervalReached()) {
+      // send out begin commit
+      commitState.startNewCommit();
+      Event event =
+          new Event(config.connectGroupId(), new 
StartCommit(commitState.currentCommitId()));
+      send(event);
+      LOG.info("Commit {} initiated", commitState.currentCommitId());
+    }
+
+    consumeAvailable(POLL_DURATION);
+
+    if (commitState.isCommitTimedOut()) {
+      commit(true);
+    }
+  }
+
+  @Override
+  protected boolean receive(Envelope envelope) {
+    switch (envelope.event().payload().type()) {
+      case DATA_WRITTEN:
+        commitState.addResponse(envelope);
+        return true;
+      case DATA_COMPLETE:
+        commitState.addReady(envelope);
+        if (commitState.isCommitReady(totalPartitionCount)) {
+          commit(false);
+        }
+        return true;
+    }
+    return false;
+  }
+
+  private void commit(boolean partialCommit) {
+    try {
+      doCommit(partialCommit);
+    } catch (Exception e) {
+      LOG.warn("Commit failed, will try again next cycle", e);
+    } finally {
+      commitState.endCurrentCommit();
+    }
+  }
+
+  private void doCommit(boolean partialCommit) {
+    Map<TableReference, List<Envelope>> commitMap = 
commitState.tableCommitMap();
+
+    String offsetsJson = offsetsJson();
+    OffsetDateTime vtts = commitState.vtts(partialCommit);
+
+    Tasks.foreach(commitMap.entrySet())
+        .executeWith(exec)
+        .stopOnFailure()
+        .run(
+            entry -> {
+              commitToTable(entry.getKey(), entry.getValue(), offsetsJson, 
vtts);
+            });
+
+    // we should only get here if all tables committed successfully...
+    commitConsumerOffsets();
+    commitState.clearResponses();
+
+    Event event =
+        new Event(config.connectGroupId(), new 
CommitComplete(commitState.currentCommitId(), vtts));
+    send(event);
+
+    LOG.info(
+        "Commit {} complete, committed to {} table(s), vtts {}",
+        commitState.currentCommitId(),
+        commitMap.size(),
+        vtts);
+  }
+
+  private String offsetsJson() {
+    try {
+      return MAPPER.writeValueAsString(controlTopicOffsets());
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+  }
+
+  private void commitToTable(
+      TableReference tableReference,
+      List<Envelope> envelopeList,
+      String offsetsJson,
+      OffsetDateTime vtts) {
+    TableIdentifier tableIdentifier = tableReference.identifier();
+    Table table;
+    try {
+      table = catalog.loadTable(tableIdentifier);
+    } catch (NoSuchTableException e) {
+      LOG.warn("Table not found, skipping commit: {}", tableIdentifier, e);
+      return;
+    }
+
+    String branch = 
config.tableConfig(tableIdentifier.toString()).commitBranch();
+
+    Map<Integer, Long> committedOffsets = lastCommittedOffsetsForTable(table, 
branch);
+
+    List<DataWritten> payloads =
+        envelopeList.stream()
+            .filter(
+                envelope -> {
+                  Long minOffset = committedOffsets.get(envelope.partition());
+                  return minOffset == null || envelope.offset() >= minOffset;
+                })
+            .map(envelope -> (DataWritten) envelope.event().payload())
+            .collect(Collectors.toList());
+
+    List<DataFile> dataFiles =
+        payloads.stream()
+            .filter(payload -> payload.dataFiles() != null)
+            .flatMap(payload -> payload.dataFiles().stream())
+            .filter(dataFile -> dataFile.recordCount() > 0)
+            .filter(distinctByKey(dataFile -> dataFile.path().toString()))
+            .collect(Collectors.toList());
+
+    List<DeleteFile> deleteFiles =

Review Comment:
   Since we are only supporting append, do we need this code?



##########
kafka-connect/kafka-connect-events/src/test/java/org/apache/iceberg/connect/events/EventTestUtil.java:
##########
@@ -45,7 +45,7 @@ private EventTestUtil() {}
       new Schema(ImmutableList.of(Types.NestedField.required(1, "id", 
Types.LongType.get())));
 
   static final PartitionSpec SPEC =
-      PartitionSpec.builderFor(SCHEMA).identity("id").withSpecId(1).build();
+      PartitionSpec.builderFor(SCHEMA).identity("id").withSpecId(0).build();

Review Comment:
   nit: default is 0, so no need to explicitly set it. 



##########
kafka-connect/kafka-connect/src/test/java/org/apache/iceberg/connect/channel/CoordinatorTest.java:
##########
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.channel;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.time.OffsetDateTime;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.DataOperations;
+import org.apache.iceberg.DeleteFile;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.connect.events.AvroUtil;
+import org.apache.iceberg.connect.events.CommitComplete;
+import org.apache.iceberg.connect.events.CommitToTable;
+import org.apache.iceberg.connect.events.DataComplete;
+import org.apache.iceberg.connect.events.DataWritten;
+import org.apache.iceberg.connect.events.Event;
+import org.apache.iceberg.connect.events.PayloadType;
+import org.apache.iceberg.connect.events.StartCommit;
+import org.apache.iceberg.connect.events.TableReference;
+import org.apache.iceberg.connect.events.TopicPartitionOffset;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.types.Types.StructType;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.connect.sink.SinkTaskContext;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+public class CoordinatorTest extends ChannelTestBase {
+
+  @Test
+  public void testCommitAppend() {
+    Assertions.assertEquals(0, 
ImmutableList.copyOf(table.snapshots().iterator()).size());
+
+    OffsetDateTime ts = EventTestUtil.now();
+    UUID commitId =
+        coordinatorTest(ImmutableList.of(EventTestUtil.createDataFile()), 
ImmutableList.of(), ts);
+    table.refresh();
+
+    assertThat(producer.history()).hasSize(3);
+    assertCommitTable(1, commitId, ts);
+    assertCommitComplete(2, commitId, ts);
+
+    List<Snapshot> snapshots = ImmutableList.copyOf(table.snapshots());
+    Assertions.assertEquals(1, snapshots.size());
+
+    Snapshot snapshot = snapshots.get(0);
+    Assertions.assertEquals(DataOperations.APPEND, snapshot.operation());
+    Assertions.assertEquals(1, 
ImmutableList.copyOf(snapshot.addedDataFiles(table.io())).size());
+    Assertions.assertEquals(0, 
ImmutableList.copyOf(snapshot.addedDeleteFiles(table.io())).size());
+
+    Map<String, String> summary = snapshot.summary();
+    Assertions.assertEquals(commitId.toString(), 
summary.get(COMMIT_ID_SNAPSHOT_PROP));
+    Assertions.assertEquals("{\"0\":3}", summary.get(OFFSETS_SNAPSHOT_PROP));
+    Assertions.assertEquals(ts.toString(), summary.get(VTTS_SNAPSHOT_PROP));
+  }
+
+  @Test
+  public void testCommitDelta() {

Review Comment:
   maybe delete file tests can be added when we have delete writers feature 
added. 



##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/CommitterFactory.java:
##########
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect;
+
+import org.apache.iceberg.connect.channel.CommitterImpl;
+
+public class CommitterFactory {
+  public static Committer createCommitter(IcebergSinkConfig config) {

Review Comment:
   is `config` needed? 



##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/IcebergSinkConfig.java:
##########
@@ -80,7 +80,6 @@ public class IcebergSinkConfig extends AbstractConfig {
   private static final String TABLES_SCHEMA_CASE_INSENSITIVE_PROP =
       "iceberg.tables.schema-case-insensitive";
   private static final String CONTROL_TOPIC_PROP = "iceberg.control.topic";
-  private static final String CONTROL_GROUP_ID_PROP = 
"iceberg.control.group-id";

Review Comment:
   why this is removed? 



##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/channel/Coordinator.java:
##########
@@ -0,0 +1,294 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.channel;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.time.Duration;
+import java.time.OffsetDateTime;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DeleteFile;
+import org.apache.iceberg.RowDelta;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.connect.IcebergSinkConfig;
+import org.apache.iceberg.connect.events.CommitComplete;
+import org.apache.iceberg.connect.events.CommitToTable;
+import org.apache.iceberg.connect.events.DataWritten;
+import org.apache.iceberg.connect.events.Event;
+import org.apache.iceberg.connect.events.StartCommit;
+import org.apache.iceberg.connect.events.TableReference;
+import org.apache.iceberg.exceptions.NoSuchTableException;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.util.Tasks;
+import org.apache.iceberg.util.ThreadPools;
+import org.apache.kafka.clients.admin.MemberDescription;
+import org.apache.kafka.connect.sink.SinkTaskContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class Coordinator extends Channel {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Coordinator.class);
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+  private static final String COMMIT_ID_SNAPSHOT_PROP = 
"kafka.connect.commit-id";
+  private static final String VTTS_SNAPSHOT_PROP = "kafka.connect.vtts";
+  private static final Duration POLL_DURATION = Duration.ofSeconds(1);
+
+  private final Catalog catalog;
+  private final IcebergSinkConfig config;
+  private final int totalPartitionCount;
+  private final String snapshotOffsetsProp;
+  private final ExecutorService exec;
+  private final CommitState commitState;
+
+  Coordinator(
+      Catalog catalog,
+      IcebergSinkConfig config,
+      Collection<MemberDescription> members,
+      KafkaClientFactory clientFactory,
+      SinkTaskContext context) {
+    // pass consumer group ID to which we commit low watermark offsets
+    super("coordinator", config.connectGroupId() + "-coord", config, 
clientFactory, context);
+
+    this.catalog = catalog;
+    this.config = config;
+    this.totalPartitionCount =
+        members.stream().mapToInt(desc -> 
desc.assignment().topicPartitions().size()).sum();
+    this.snapshotOffsetsProp =
+        String.format(
+            "kafka.connect.offsets.%s.%s", config.controlTopic(), 
config.connectGroupId());
+    this.exec = ThreadPools.newWorkerPool("iceberg-committer", 
config.commitThreads());
+    this.commitState = new CommitState(config);
+  }
+
+  void process() {
+    if (commitState.isCommitIntervalReached()) {
+      // send out begin commit
+      commitState.startNewCommit();
+      Event event =
+          new Event(config.connectGroupId(), new 
StartCommit(commitState.currentCommitId()));
+      send(event);
+      LOG.info("Commit {} initiated", commitState.currentCommitId());
+    }
+
+    consumeAvailable(POLL_DURATION);
+
+    if (commitState.isCommitTimedOut()) {
+      commit(true);
+    }
+  }
+
+  @Override
+  protected boolean receive(Envelope envelope) {
+    switch (envelope.event().payload().type()) {
+      case DATA_WRITTEN:
+        commitState.addResponse(envelope);
+        return true;
+      case DATA_COMPLETE:
+        commitState.addReady(envelope);
+        if (commitState.isCommitReady(totalPartitionCount)) {
+          commit(false);
+        }
+        return true;
+    }
+    return false;
+  }
+
+  private void commit(boolean partialCommit) {
+    try {
+      doCommit(partialCommit);
+    } catch (Exception e) {
+      LOG.warn("Commit failed, will try again next cycle", e);
+    } finally {
+      commitState.endCurrentCommit();
+    }
+  }
+
+  private void doCommit(boolean partialCommit) {
+    Map<TableReference, List<Envelope>> commitMap = 
commitState.tableCommitMap();
+
+    String offsetsJson = offsetsJson();
+    OffsetDateTime vtts = commitState.vtts(partialCommit);
+
+    Tasks.foreach(commitMap.entrySet())
+        .executeWith(exec)
+        .stopOnFailure()
+        .run(
+            entry -> {
+              commitToTable(entry.getKey(), entry.getValue(), offsetsJson, 
vtts);
+            });
+
+    // we should only get here if all tables committed successfully...
+    commitConsumerOffsets();
+    commitState.clearResponses();
+
+    Event event =
+        new Event(config.connectGroupId(), new 
CommitComplete(commitState.currentCommitId(), vtts));
+    send(event);
+
+    LOG.info(
+        "Commit {} complete, committed to {} table(s), vtts {}",
+        commitState.currentCommitId(),
+        commitMap.size(),
+        vtts);
+  }
+
+  private String offsetsJson() {
+    try {
+      return MAPPER.writeValueAsString(controlTopicOffsets());
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+  }
+
+  private void commitToTable(
+      TableReference tableReference,
+      List<Envelope> envelopeList,
+      String offsetsJson,
+      OffsetDateTime vtts) {
+    TableIdentifier tableIdentifier = tableReference.identifier();
+    Table table;
+    try {
+      table = catalog.loadTable(tableIdentifier);
+    } catch (NoSuchTableException e) {
+      LOG.warn("Table not found, skipping commit: {}", tableIdentifier, e);
+      return;
+    }
+
+    String branch = 
config.tableConfig(tableIdentifier.toString()).commitBranch();
+
+    Map<Integer, Long> committedOffsets = lastCommittedOffsetsForTable(table, 
branch);
+
+    List<DataWritten> payloads =
+        envelopeList.stream()
+            .filter(
+                envelope -> {
+                  Long minOffset = committedOffsets.get(envelope.partition());
+                  return minOffset == null || envelope.offset() >= minOffset;
+                })
+            .map(envelope -> (DataWritten) envelope.event().payload())
+            .collect(Collectors.toList());
+
+    List<DataFile> dataFiles =
+        payloads.stream()
+            .filter(payload -> payload.dataFiles() != null)
+            .flatMap(payload -> payload.dataFiles().stream())
+            .filter(dataFile -> dataFile.recordCount() > 0)
+            .filter(distinctByKey(dataFile -> dataFile.path().toString()))
+            .collect(Collectors.toList());
+
+    List<DeleteFile> deleteFiles =

Review Comment:
   Similar comments for below `RowDelta` block.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to