fqaiser94 commented on code in PR #9641:
URL: https://github.com/apache/iceberg/pull/9641#discussion_r1501877753


##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/BaseDeltaTaskWriter.java:
##########
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.data;
+
+import java.io.IOException;
+import java.util.Set;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.PartitionKey;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.data.InternalRecordWrapper;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.io.BaseTaskWriter;
+import org.apache.iceberg.io.FileAppenderFactory;
+import org.apache.iceberg.io.FileIO;
+import org.apache.iceberg.io.OutputFileFactory;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.types.TypeUtil;
+
+abstract class BaseDeltaTaskWriter extends BaseTaskWriter<Record> {
+
+  private final Schema schema;
+  private final Schema deleteSchema;
+  private final InternalRecordWrapper wrapper;
+  private final InternalRecordWrapper keyWrapper;

Review Comment:
   I guess similar to the above, why do we have these wrappers at all? 
   I was able to remove these and their usages and was still to able pass the 
tests in the original tabular repo as well. 



##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/RecordConverter.java:
##########
@@ -0,0 +1,508 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.data;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeFormatter;
+import java.time.format.DateTimeFormatterBuilder;
+import java.time.format.DateTimeParseException;
+import java.time.temporal.Temporal;
+import java.util.Base64;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableProperties;
+import org.apache.iceberg.connect.IcebergSinkConfig;
+import org.apache.iceberg.data.GenericRecord;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.mapping.MappedField;
+import org.apache.iceberg.mapping.NameMapping;
+import org.apache.iceberg.mapping.NameMappingParser;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
+import org.apache.iceberg.types.Type;
+import org.apache.iceberg.types.Type.PrimitiveType;
+import org.apache.iceberg.types.Types.DecimalType;
+import org.apache.iceberg.types.Types.ListType;
+import org.apache.iceberg.types.Types.MapType;
+import org.apache.iceberg.types.Types.NestedField;
+import org.apache.iceberg.types.Types.StructType;
+import org.apache.iceberg.types.Types.TimestampType;
+import org.apache.iceberg.util.DateTimeUtil;
+import org.apache.kafka.connect.data.Struct;
+
+public class RecordConverter {
+
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+
+  private static final DateTimeFormatter OFFSET_TS_FMT =
+      new DateTimeFormatterBuilder()
+          .append(DateTimeFormatter.ISO_LOCAL_DATE_TIME)
+          .appendOffset("+HHmm", "Z")
+          .toFormatter();
+
+  private final Schema tableSchema;
+  private final NameMapping nameMapping;
+  private final IcebergSinkConfig config;
+  private final Map<Integer, Map<String, NestedField>> structNameMap = 
Maps.newHashMap();
+
+  public RecordConverter(Table table, IcebergSinkConfig config) {
+    this.tableSchema = table.schema();
+    this.nameMapping = createNameMapping(table);
+    this.config = config;
+  }
+
+  public Record convert(Object data) {
+    return convert(data, null);
+  }
+
+  public Record convert(Object data, SchemaUpdate.Consumer 
schemaUpdateConsumer) {
+    if (data instanceof Struct || data instanceof Map) {
+      return convertStructValue(data, tableSchema.asStruct(), -1, 
schemaUpdateConsumer);
+    }
+    throw new UnsupportedOperationException("Cannot convert type: " + 
data.getClass().getName());
+  }
+
+  private NameMapping createNameMapping(Table table) {
+    String nameMappingString = 
table.properties().get(TableProperties.DEFAULT_NAME_MAPPING);
+    return nameMappingString != null ? 
NameMappingParser.fromJson(nameMappingString) : null;
+  }
+
+  private Object convertValue(
+      Object value, Type type, int fieldId, SchemaUpdate.Consumer 
schemaUpdateConsumer) {
+    if (value == null) {
+      return null;
+    }
+    switch (type.typeId()) {
+      case STRUCT:
+        return convertStructValue(value, type.asStructType(), fieldId, 
schemaUpdateConsumer);
+      case LIST:
+        return convertListValue(value, type.asListType(), 
schemaUpdateConsumer);
+      case MAP:
+        return convertMapValue(value, type.asMapType(), schemaUpdateConsumer);
+      case INTEGER:
+        return convertInt(value);
+      case LONG:
+        return convertLong(value);
+      case FLOAT:
+        return convertFloat(value);
+      case DOUBLE:
+        return convertDouble(value);
+      case DECIMAL:
+        return convertDecimal(value, (DecimalType) type);
+      case BOOLEAN:
+        return convertBoolean(value);
+      case STRING:
+        return convertString(value);
+      case UUID:
+        return convertUUID(value);
+      case BINARY:
+      case FIXED:
+        return convertBase64Binary(value);
+      case DATE:
+        return convertDateValue(value);
+      case TIME:
+        return convertTimeValue(value);
+      case TIMESTAMP:
+        return convertTimestampValue(value, (TimestampType) type);
+    }
+    throw new UnsupportedOperationException("Unsupported type: " + 
type.typeId());
+  }
+
+  protected GenericRecord convertStructValue(
+      Object value,
+      StructType schema,
+      int parentFieldId,
+      SchemaUpdate.Consumer schemaUpdateConsumer) {
+    if (value instanceof Map) {
+      return convertToStruct((Map<?, ?>) value, schema, parentFieldId, 
schemaUpdateConsumer);
+    } else if (value instanceof Struct) {
+      return convertToStruct((Struct) value, schema, parentFieldId, 
schemaUpdateConsumer);
+    }
+    throw new IllegalArgumentException("Cannot convert to struct: " + 
value.getClass().getName());
+  }
+
+  private GenericRecord convertToStruct(
+      Map<?, ?> map,
+      StructType schema,
+      int structFieldId,
+      SchemaUpdate.Consumer schemaUpdateConsumer) {
+    GenericRecord result = GenericRecord.create(schema);
+    map.forEach(
+        (recordFieldNameObj, recordFieldValue) -> {
+          String recordFieldName = recordFieldNameObj.toString();
+          NestedField tableField = lookupStructField(recordFieldName, schema, 
structFieldId);
+          if (tableField == null) {
+            // add the column if schema evolution is on, otherwise skip the 
value,
+            // skip the add column if we can't infer the type
+            if (schemaUpdateConsumer != null) {
+              Type type = SchemaUtils.inferIcebergType(recordFieldValue, 
config);
+              if (type != null) {
+                String parentFieldName =
+                    structFieldId < 0 ? null : 
tableSchema.findColumnName(structFieldId);
+                schemaUpdateConsumer.addColumn(parentFieldName, 
recordFieldName, type);
+              }
+            }
+          } else {
+            result.setField(
+                tableField.name(),
+                convertValue(
+                    recordFieldValue,
+                    tableField.type(),
+                    tableField.fieldId(),
+                    schemaUpdateConsumer));
+          }
+        });
+    return result;
+  }
+
+  private GenericRecord convertToStruct(
+      Struct struct,
+      StructType schema,
+      int structFieldId,
+      SchemaUpdate.Consumer schemaUpdateConsumer) {
+    GenericRecord result = GenericRecord.create(schema);
+    struct
+        .schema()
+        .fields()
+        .forEach(
+            recordField -> {
+              NestedField tableField = lookupStructField(recordField.name(), 
schema, structFieldId);
+              if (tableField == null) {
+                // add the column if schema evolution is on, otherwise skip 
the value
+                if (schemaUpdateConsumer != null) {
+                  String parentFieldName =
+                      structFieldId < 0 ? null : 
tableSchema.findColumnName(structFieldId);
+                  Type type = SchemaUtils.toIcebergType(recordField.schema(), 
config);
+                  schemaUpdateConsumer.addColumn(parentFieldName, 
recordField.name(), type);
+                }

Review Comment:
   CMIIW but I'm wondering if there is a bug here? When we enter this branch, 
although we update the table schema to add a new column, we don't seem to add 
the value of that new column to the `(GenericRecord) result`. 
   
   I feel like this test _should_ pass but currently it does not: 
   ```java
     @Test
     public void testNewColumn() {
       final org.apache.iceberg.Schema table_schema = new 
org.apache.iceberg.Schema(
               NestedField.required(20, "i", IntegerType.get()));
       final Table table = mock(Table.class);
       when(table.schema()).thenReturn(table_schema);
   
       final Schema connect_schema =
               SchemaBuilder.struct()
                       .field("i", Schema.INT32_SCHEMA)
                       .field("l", Schema.INT32_SCHEMA);
       final Struct data = new Struct(connect_schema).put("i", 1).put("l", 2);
   
       final RecordConverter converter = new RecordConverter(table, config);
       final SchemaUpdate.Consumer consumer = new SchemaUpdate.Consumer();
       final Record record = converter.convert(data, consumer);
   
       // should add column "l"
       assertThat(consumer.empty()).isFalse();
   
       GenericRecord rec = (GenericRecord) record;
       assertThat(rec.getField("i")).isEqualTo(1);
       assertThat(rec.getField("l")).isEqualTo(2); // this assertion fails, 
expected=2, actual=null
     }
   ```



##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/IcebergWriter.java:
##########
@@ -77,8 +78,47 @@ public void write(SinkRecord record) {
   }
 
   private Record convertToRow(SinkRecord record) {
-    // FIXME: update this when the record converter is added
-    return null;
+    if (!config.evolveSchemaEnabled()) {
+      return recordConverter.convert(record.value());

Review Comment:
   It seems we only really sink the `value` portion of the records. 
   Is the idea that if users have valuable data in the `key`, they should 
combine this sink-connector with an appropriate SMT to move the data from the 
`key` into the `value`?



##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/IcebergWriter.java:
##########
@@ -77,8 +78,47 @@ public void write(SinkRecord record) {
   }
 
   private Record convertToRow(SinkRecord record) {
-    // FIXME: update this when the record converter is added
-    return null;
+    if (!config.evolveSchemaEnabled()) {
+      return recordConverter.convert(record.value());
+    }
+
+    SchemaUpdate.Consumer updates = new SchemaUpdate.Consumer();
+    Record row = recordConverter.convert(record.value(), updates);
+
+    if (!updates.empty()) {
+      // complete the current file
+      flush();
+      // apply the schema updates, this will refresh the table
+      SchemaUtils.applySchemaUpdates(table, updates);
+      // initialize a new writer with the new schema
+      initNewWriter();
+      // convert the row again, this time using the new table schema
+      row = recordConverter.convert(record.value(), null);

Review Comment:
   ahhh I see now, you convert it again afterwards with the new schema, and 
presumably this time you won't hit that branch and will include the value in 
the resulting row ...
   
   Is the reason we need to do this twice because we basically don't know the 
new field's ID before the schema evolution is executed yet? 
   



##########
kafka-connect/kafka-connect/src/main/java/org/apache/iceberg/connect/data/BaseDeltaTaskWriter.java:
##########
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.connect.data;
+
+import java.io.IOException;
+import java.util.Set;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.PartitionKey;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.data.InternalRecordWrapper;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.io.BaseTaskWriter;
+import org.apache.iceberg.io.FileAppenderFactory;
+import org.apache.iceberg.io.FileIO;
+import org.apache.iceberg.io.OutputFileFactory;
+import org.apache.iceberg.relocated.com.google.common.collect.Sets;
+import org.apache.iceberg.types.TypeUtil;
+
+abstract class BaseDeltaTaskWriter extends BaseTaskWriter<Record> {
+
+  private final Schema schema;
+  private final Schema deleteSchema;
+  private final InternalRecordWrapper wrapper;
+  private final InternalRecordWrapper keyWrapper;
+  private final RecordProjection keyProjection;
+  private final boolean upsertMode;
+
+  BaseDeltaTaskWriter(
+      PartitionSpec spec,
+      FileFormat format,
+      FileAppenderFactory<Record> appenderFactory,
+      OutputFileFactory fileFactory,
+      FileIO io,
+      long targetFileSize,
+      Schema schema,
+      Set<Integer> identifierFieldIds,
+      boolean upsertMode) {
+    super(spec, format, appenderFactory, fileFactory, io, targetFileSize);
+    this.schema = schema;
+    this.deleteSchema = TypeUtil.select(schema, 
Sets.newHashSet(identifierFieldIds));
+    this.wrapper = new InternalRecordWrapper(schema.asStruct());
+    this.keyWrapper = new InternalRecordWrapper(deleteSchema.asStruct());
+    this.keyProjection = RecordProjection.create(schema, deleteSchema);
+    this.upsertMode = upsertMode;
+  }
+
+  abstract RowDataDeltaWriter route(Record row);
+
+  InternalRecordWrapper wrapper() {
+    return wrapper;
+  }
+
+  @Override
+  public void write(Record row) throws IOException {
+    Operation op =
+        row instanceof RecordWrapper
+            ? ((RecordWrapper) row).op()
+            : upsertMode ? Operation.UPDATE : Operation.INSERT;
+    RowDataDeltaWriter writer = route(row);
+    if (op == Operation.UPDATE || op == Operation.DELETE) {
+      writer.deleteKey(keyProjection.wrap(row));
+    }
+    if (op == Operation.UPDATE || op == Operation.INSERT) {
+      writer.write(row);
+    }
+  }
+
+  class RowDataDeltaWriter extends BaseEqualityDeltaWriter {
+
+    RowDataDeltaWriter(PartitionKey partition) {
+      super(partition, schema, deleteSchema);
+    }
+
+    @Override
+    protected StructLike asStructLike(Record data) {
+      return wrapper.wrap(data);

Review Comment:
   q: why do we need to wrap these data records? 
   
   I'm able to replace this line with this: 
   ```suggestion
         return data;
   ```
   and still pass all the tests. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to