ZENOTME commented on code in PR #703:
URL: https://github.com/apache/iceberg-rust/pull/703#discussion_r1856217345


##########
crates/iceberg/src/writer/base_writer/equality_delete_writer.rs:
##########
@@ -0,0 +1,538 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! This module provide `EqualityDeleteWriter`.
+
+use std::sync::Arc;
+
+use arrow_array::{ArrayRef, RecordBatch, StructArray};
+use arrow_schema::{DataType, FieldRef, Fields, Schema, SchemaRef};
+use itertools::Itertools;
+use parquet::arrow::PARQUET_FIELD_ID_META_KEY;
+
+use crate::spec::{DataFile, Struct};
+use crate::writer::file_writer::{FileWriter, FileWriterBuilder};
+use crate::writer::{IcebergWriter, IcebergWriterBuilder};
+use crate::{Error, ErrorKind, Result};
+
+/// Builder for `EqualityDeleteWriter`.
+#[derive(Clone)]
+pub struct EqualityDeleteFileWriterBuilder<B: FileWriterBuilder> {
+    inner: B,
+}
+
+impl<B: FileWriterBuilder> EqualityDeleteFileWriterBuilder<B> {
+    /// Create a new `EqualityDeleteFileWriterBuilder` using a 
`FileWriterBuilder`.
+    pub fn new(inner: B) -> Self {
+        Self { inner }
+    }
+}
+
+/// Config for `EqualityDeleteWriter`.
+pub struct EqualityDeleteWriterConfig {
+    // Field ids used to determine row equality in equality delete files.
+    equality_ids: Vec<usize>,
+    // Projector used to project the data chunk into specific fields.
+    projector: ArrowFieldProjector,
+    partition_value: Struct,
+}
+
+impl EqualityDeleteWriterConfig {
+    /// Create a new `DataFileWriterConfig` with equality ids.
+    pub fn new(
+        equality_ids: Vec<usize>,
+        projector: ArrowFieldProjector,
+        partition_value: Option<Struct>,
+    ) -> Self {
+        Self {
+            equality_ids,
+            projector,
+            partition_value: partition_value.unwrap_or(Struct::empty()),
+        }
+    }
+}
+
+#[async_trait::async_trait]
+impl<B: FileWriterBuilder> IcebergWriterBuilder for 
EqualityDeleteFileWriterBuilder<B> {
+    type R = EqualityDeleteFileWriter<B>;
+    type C = EqualityDeleteWriterConfig;
+
+    async fn build(self, config: Self::C) -> Result<Self::R> {
+        Ok(EqualityDeleteFileWriter {
+            inner_writer: Some(self.inner.clone().build().await?),
+            projector: config.projector,
+            equality_ids: config.equality_ids,
+            partition_value: config.partition_value,
+        })
+    }
+}
+
+/// Writer used to write equality delete files.
+pub struct EqualityDeleteFileWriter<B: FileWriterBuilder> {
+    inner_writer: Option<B::R>,
+    projector: ArrowFieldProjector,
+    equality_ids: Vec<usize>,
+    partition_value: Struct,
+}
+
+#[async_trait::async_trait]
+impl<B: FileWriterBuilder> IcebergWriter for EqualityDeleteFileWriter<B> {
+    async fn write(&mut self, batch: RecordBatch) -> Result<()> {
+        let batch = self.projector.project_bacth(batch)?;
+        if let Some(writer) = self.inner_writer.as_mut() {
+            writer.write(&batch).await
+        } else {
+            Err(Error::new(
+                ErrorKind::Unexpected,
+                "Equality delete inner writer does not exist",
+            ))
+        }
+    }
+
+    async fn close(&mut self) -> Result<Vec<DataFile>> {
+        if let Some(writer) = self.inner_writer.take() {
+            Ok(writer
+                .close()
+                .await?
+                .into_iter()
+                .map(|mut res| {
+                    res.content(crate::spec::DataContentType::EqualityDeletes);
+                    res.equality_ids(self.equality_ids.iter().map(|id| *id as 
i32).collect_vec());
+                    res.partition(self.partition_value.clone());
+                    res.build().expect("msg")
+                })
+                .collect_vec())
+        } else {
+            Err(Error::new(
+                ErrorKind::Unexpected,
+                "Equality delete inner writer does not exist",
+            ))
+        }
+    }
+}
+
+/// Help to project specific field from `RecordBatch`` according to the fields 
id.
+#[derive(Clone)]
+pub struct ArrowFieldProjector {
+    // A vector of vectors, where each inner vector represents the index path 
to access a specific field in a nested structure.
+    // E.g. [[0], [1, 2]] means the first field is accessed directly from the 
first column,
+    // while the second field is accessed from the second column and then from 
its third subcolumn (second column must be a struct column).
+    field_indices: Vec<Vec<usize>>,
+    // The schema reference after projection. This schema is derived from the 
original schema based on the given field IDs.
+    projected_schema: SchemaRef,
+}
+
+impl ArrowFieldProjector {
+    /// Init ArrowFieldProjector
+    pub fn new(original_schema: SchemaRef, field_ids: &[usize]) -> 
Result<Self> {
+        let mut field_indexs = Vec::with_capacity(field_ids.len());
+        let mut fields = Vec::with_capacity(field_ids.len());
+        for &id in field_ids {
+            let mut field_index = vec![];
+            if let Ok(field) = Self::fetch_field_index(
+                original_schema.fields(),
+                &mut field_index,
+                id as i64,
+                PARQUET_FIELD_ID_META_KEY,
+            ) {
+                fields.push(field.clone());
+                field_indexs.push(field_index);
+            } else {
+                return Err(Error::new(
+                    ErrorKind::DataInvalid,
+                    format!(
+                        "Can't find source column id or column data type 
invalid: {}",
+                        id
+                    ),
+                ));
+            }
+        }
+        let delete_arrow_schema = Arc::new(Schema::new(fields));
+        Ok(Self {
+            field_indices: field_indexs,
+            projected_schema: delete_arrow_schema,
+        })
+    }
+
+    fn fetch_field_index(

Review Comment:
   This requires a new `visit_`, e.g we need to stop vistor following field 
when we reach the target field, otherwise we need something like stop flag to 
indicates that the field has been fetch. So I doubt this will really simplify 
the code.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to