liurenjie1024 commented on code in PR #703:
URL: https://github.com/apache/iceberg-rust/pull/703#discussion_r1858032287


##########
crates/iceberg/src/arrow/record_batch_projector.rs:
##########
@@ -0,0 +1,261 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::sync::Arc;
+
+use arrow_array::{ArrayRef, RecordBatch, StructArray};
+use arrow_schema::{DataType, Field, FieldRef, Fields, Schema, SchemaRef};
+
+use crate::error::Result;
+use crate::{Error, ErrorKind};
+
+/// Help to project specific field from `RecordBatch`` according to the fields 
id of meta of field.
+#[derive(Clone)]
+pub struct RecordBatchProjector {

Review Comment:
   We should make this `pub(crate)`, otherwise it would be easy to leak this 
when export module by mistake.



##########
crates/iceberg/src/arrow/record_batch_projector.rs:
##########
@@ -0,0 +1,261 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::sync::Arc;
+
+use arrow_array::{ArrayRef, RecordBatch, StructArray};
+use arrow_schema::{DataType, Field, FieldRef, Fields, Schema, SchemaRef};
+
+use crate::error::Result;
+use crate::{Error, ErrorKind};
+
+/// Help to project specific field from `RecordBatch`` according to the fields 
id of meta of field.
+#[derive(Clone)]
+pub struct RecordBatchProjector {
+    // A vector of vectors, where each inner vector represents the index path 
to access a specific field in a nested structure.
+    // E.g. [[0], [1, 2]] means the first field is accessed directly from the 
first column,
+    // while the second field is accessed from the second column and then from 
its third subcolumn (second column must be a struct column).
+    field_indices: Vec<Vec<usize>>,
+    // The schema reference after projection. This schema is derived from the 
original schema based on the given field IDs.
+    projected_schema: SchemaRef,
+}
+
+impl RecordBatchProjector {
+    /// Init ArrowFieldProjector
+    pub fn new<F>(
+        original_schema: SchemaRef,
+        field_ids: &[i32],
+        field_id_fetch_func: F,
+    ) -> Result<Self>
+    where
+        F: Fn(&Field) -> Option<i64>,
+    {
+        let mut field_indices = Vec::with_capacity(field_ids.len());
+        let mut fields = Vec::with_capacity(field_ids.len());
+        for &id in field_ids {
+            let mut field_index = vec![];
+            if let Ok(field) = Self::fetch_field_index(
+                original_schema.fields(),
+                &mut field_index,
+                id as i64,
+                &field_id_fetch_func,
+            ) {
+                fields.push(field.clone());
+                field_indices.push(field_index);
+            } else {
+                return Err(Error::new(
+                    ErrorKind::DataInvalid,
+                    format!(
+                        "Can't find source column id or column data type 
invalid: {}",
+                        id
+                    ),
+                ));
+            }

Review Comment:
   ```suggestion
   Self::fetch_field_index(
                   original_schema.fields(),
                   &mut field_index,
                   id as i64,
                   &field_id_fetch_func,
               ) ?;
               fields.push(field.clone());
                   field_indices.push(field_index);
    
   ```



##########
crates/iceberg/src/writer/base_writer/equality_delete_writer.rs:
##########
@@ -0,0 +1,421 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! This module provide `EqualityDeleteWriter`.
+
+use arrow_array::RecordBatch;
+use arrow_schema::SchemaRef;
+use itertools::Itertools;
+use parquet::arrow::PARQUET_FIELD_ID_META_KEY;
+
+use crate::arrow::record_batch_projector::RecordBatchProjector;
+use crate::spec::{DataFile, Struct};
+use crate::writer::file_writer::{FileWriter, FileWriterBuilder};
+use crate::writer::{IcebergWriter, IcebergWriterBuilder};
+use crate::{Error, ErrorKind, Result};
+
+/// Builder for `EqualityDeleteWriter`.
+#[derive(Clone)]
+pub struct EqualityDeleteFileWriterBuilder<B: FileWriterBuilder> {
+    inner: B,
+}
+
+impl<B: FileWriterBuilder> EqualityDeleteFileWriterBuilder<B> {
+    /// Create a new `EqualityDeleteFileWriterBuilder` using a 
`FileWriterBuilder`.
+    pub fn new(inner: B) -> Self {
+        Self { inner }
+    }
+}
+
+/// Config for `EqualityDeleteWriter`.
+pub struct EqualityDeleteWriterConfig {
+    // Field ids used to determine row equality in equality delete files.
+    equality_ids: Vec<i32>,
+    // Projector used to project the data chunk into specific fields.
+    projector: RecordBatchProjector,
+    partition_value: Struct,
+}
+
+impl EqualityDeleteWriterConfig {
+    /// Create a new `DataFileWriterConfig` with equality ids.
+    pub fn new(
+        equality_ids: Vec<i32>,
+        original_schema: SchemaRef,

Review Comment:
   I still don't understand why we ask user to pass an arrow schema here?I 
think the original_schema is table schema, right? It would be more convenient 
to pass iceberg schema for user?



##########
crates/iceberg/src/arrow/record_batch_projector.rs:
##########
@@ -0,0 +1,261 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::sync::Arc;
+
+use arrow_array::{ArrayRef, RecordBatch, StructArray};
+use arrow_schema::{DataType, Field, FieldRef, Fields, Schema, SchemaRef};
+
+use crate::error::Result;
+use crate::{Error, ErrorKind};
+
+/// Help to project specific field from `RecordBatch`` according to the fields 
id of meta of field.
+#[derive(Clone)]
+pub struct RecordBatchProjector {
+    // A vector of vectors, where each inner vector represents the index path 
to access a specific field in a nested structure.
+    // E.g. [[0], [1, 2]] means the first field is accessed directly from the 
first column,
+    // while the second field is accessed from the second column and then from 
its third subcolumn (second column must be a struct column).
+    field_indices: Vec<Vec<usize>>,
+    // The schema reference after projection. This schema is derived from the 
original schema based on the given field IDs.
+    projected_schema: SchemaRef,
+}
+
+impl RecordBatchProjector {
+    /// Init ArrowFieldProjector
+    pub fn new<F>(
+        original_schema: SchemaRef,
+        field_ids: &[i32],
+        field_id_fetch_func: F,
+    ) -> Result<Self>
+    where
+        F: Fn(&Field) -> Option<i64>,
+    {
+        let mut field_indices = Vec::with_capacity(field_ids.len());
+        let mut fields = Vec::with_capacity(field_ids.len());
+        for &id in field_ids {
+            let mut field_index = vec![];
+            if let Ok(field) = Self::fetch_field_index(
+                original_schema.fields(),
+                &mut field_index,
+                id as i64,
+                &field_id_fetch_func,
+            ) {
+                fields.push(field.clone());
+                field_indices.push(field_index);
+            } else {
+                return Err(Error::new(
+                    ErrorKind::DataInvalid,
+                    format!(
+                        "Can't find source column id or column data type 
invalid: {}",
+                        id
+                    ),
+                ));
+            }
+        }
+        let delete_arrow_schema = Arc::new(Schema::new(fields));
+        Ok(Self {
+            field_indices,
+            projected_schema: delete_arrow_schema,
+        })
+    }
+
+    fn fetch_field_index<F>(
+        fields: &Fields,
+        index_vec: &mut Vec<usize>,
+        target_field_id: i64,
+        field_id_fetch_func: &F,
+    ) -> Result<FieldRef>
+    where
+        F: Fn(&Field) -> Option<i64>,
+    {
+        for (pos, field) in fields.iter().enumerate() {
+            match field.data_type() {
+                DataType::Float16 | DataType::Float32 | DataType::Float64 => {
+                    return Err(Error::new(
+                        ErrorKind::DataInvalid,
+                        "Delete column data type cannot be float or double",
+                    ));
+                }

Review Comment:
   Why this check happens here? It means that any field is float then we throw 
an error, even it's not deletion field? Also as a column batch projector, we 
should not do this since this is iceberg specific logic.



##########
crates/iceberg/src/arrow/record_batch_projector.rs:
##########
@@ -0,0 +1,261 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::sync::Arc;
+
+use arrow_array::{ArrayRef, RecordBatch, StructArray};
+use arrow_schema::{DataType, Field, FieldRef, Fields, Schema, SchemaRef};
+
+use crate::error::Result;
+use crate::{Error, ErrorKind};
+
+/// Help to project specific field from `RecordBatch`` according to the fields 
id of meta of field.
+#[derive(Clone)]
+pub struct RecordBatchProjector {
+    // A vector of vectors, where each inner vector represents the index path 
to access a specific field in a nested structure.
+    // E.g. [[0], [1, 2]] means the first field is accessed directly from the 
first column,
+    // while the second field is accessed from the second column and then from 
its third subcolumn (second column must be a struct column).
+    field_indices: Vec<Vec<usize>>,
+    // The schema reference after projection. This schema is derived from the 
original schema based on the given field IDs.
+    projected_schema: SchemaRef,
+}
+
+impl RecordBatchProjector {
+    /// Init ArrowFieldProjector
+    pub fn new<F>(
+        original_schema: SchemaRef,
+        field_ids: &[i32],
+        field_id_fetch_func: F,
+    ) -> Result<Self>
+    where
+        F: Fn(&Field) -> Option<i64>,

Review Comment:
   ```suggestion
           F: Fn(&Field) -> Result<i64>,
   ```
   The return type here should be a result as it's possible to have error 
happened.
   



##########
crates/iceberg/src/writer/base_writer/equality_delete_writer.rs:
##########
@@ -0,0 +1,421 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! This module provide `EqualityDeleteWriter`.
+
+use arrow_array::RecordBatch;
+use arrow_schema::SchemaRef;
+use itertools::Itertools;
+use parquet::arrow::PARQUET_FIELD_ID_META_KEY;
+
+use crate::arrow::record_batch_projector::RecordBatchProjector;
+use crate::spec::{DataFile, Struct};
+use crate::writer::file_writer::{FileWriter, FileWriterBuilder};
+use crate::writer::{IcebergWriter, IcebergWriterBuilder};
+use crate::{Error, ErrorKind, Result};
+
+/// Builder for `EqualityDeleteWriter`.
+#[derive(Clone)]
+pub struct EqualityDeleteFileWriterBuilder<B: FileWriterBuilder> {
+    inner: B,
+}
+
+impl<B: FileWriterBuilder> EqualityDeleteFileWriterBuilder<B> {
+    /// Create a new `EqualityDeleteFileWriterBuilder` using a 
`FileWriterBuilder`.
+    pub fn new(inner: B) -> Self {
+        Self { inner }
+    }
+}
+
+/// Config for `EqualityDeleteWriter`.
+pub struct EqualityDeleteWriterConfig {
+    // Field ids used to determine row equality in equality delete files.
+    equality_ids: Vec<i32>,
+    // Projector used to project the data chunk into specific fields.
+    projector: RecordBatchProjector,
+    partition_value: Struct,
+}
+
+impl EqualityDeleteWriterConfig {
+    /// Create a new `DataFileWriterConfig` with equality ids.
+    pub fn new(
+        equality_ids: Vec<i32>,
+        original_schema: SchemaRef,
+        partition_value: Option<Struct>,
+    ) -> Result<Self> {
+        let projector = RecordBatchProjector::new(original_schema, 
&equality_ids, |field| {
+            field
+                .metadata()
+                .get(PARQUET_FIELD_ID_META_KEY)
+                .and_then(|value| value.parse::<i64>().ok())
+        })?;
+        Ok(Self {
+            equality_ids,
+            projector,
+            partition_value: partition_value.unwrap_or(Struct::empty()),
+        })
+    }
+
+    /// Return projected Schema
+    pub fn projected_schema_ref(&self) -> &SchemaRef {

Review Comment:
   Should this be crate private?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to