liurenjie1024 commented on code in PR #275: URL: https://github.com/apache/iceberg-rust/pull/275#discussion_r1529603925
########## crates/iceberg/src/writer/mod.rs: ########## @@ -15,14 +15,65 @@ // specific language governing permissions and limitations // under the License. -//! The iceberg writer module. - -use crate::spec::DataFileBuilder; +//! Iceberg writer module. +//! +//! The writer API is designed to be extensible and flexible. Each writer is decoupled and can be create and config independently. User can: +//! 1.Customize the writer using the writer trait. +//! 2.Combine different writer to build a writer which have complex write logic. +//! +//! There are two kinds of writer: +//! 1. FileWriter: Focus on writing record batch to different physical file format.(Such as parquet. orc) +//! 2. IcebergWriter: Focus on the logical format of iceberg table. It will write the data using the FileWriter finally. +//! +//! # Simple example for data file writer: +//! ```ignore +//! // Create a parquet file writer builder. The parameter can get from table. +//! let file_writer_builder = ParquetWriterBuilder::new( +//! 0, +//! WriterProperties::builder().build(), +//! schema, +//! file_io.clone(), +//! loccation_gen, +//! file_name_gen, +//! ) +//! // Create a data file writer using parquet file writer builder. +//! let data_file_builder = DataFileBuilder::new(file_writer_builder); +//! // Build the data file writer. +//! let data_file_writer = data_file_builder.build().await.unwrap(); +//! +//! data_file_writer.write(&record_batch).await.unwrap(); +//! let data_files = data_file_writer.flush().await.unwrap(); +//! ``` +pub mod base_writer; pub mod file_writer; +use crate::{spec::DataFileBuilder, Result}; +use arrow_array::RecordBatch; + +type DefaultInput = RecordBatch; type DefaultOutput = Vec<DataFileBuilder>; +/// The builder for iceberg writer. +#[allow(async_fn_in_trait)] Review Comment: Why we need this? ########## crates/iceberg/src/writer/mod.rs: ########## @@ -15,14 +15,65 @@ // specific language governing permissions and limitations // under the License. -//! The iceberg writer module. - -use crate::spec::DataFileBuilder; +//! Iceberg writer module. +//! +//! The writer API is designed to be extensible and flexible. Each writer is decoupled and can be create and config independently. User can: +//! 1.Customize the writer using the writer trait. +//! 2.Combine different writer to build a writer which have complex write logic. +//! +//! There are two kinds of writer: +//! 1. FileWriter: Focus on writing record batch to different physical file format.(Such as parquet. orc) +//! 2. IcebergWriter: Focus on the logical format of iceberg table. It will write the data using the FileWriter finally. +//! +//! # Simple example for data file writer: +//! ```ignore +//! // Create a parquet file writer builder. The parameter can get from table. +//! let file_writer_builder = ParquetWriterBuilder::new( +//! 0, +//! WriterProperties::builder().build(), +//! schema, +//! file_io.clone(), +//! loccation_gen, +//! file_name_gen, +//! ) +//! // Create a data file writer using parquet file writer builder. +//! let data_file_builder = DataFileBuilder::new(file_writer_builder); +//! // Build the data file writer. +//! let data_file_writer = data_file_builder.build().await.unwrap(); +//! +//! data_file_writer.write(&record_batch).await.unwrap(); +//! let data_files = data_file_writer.flush().await.unwrap(); +//! ``` +pub mod base_writer; pub mod file_writer; +use crate::{spec::DataFileBuilder, Result}; +use arrow_array::RecordBatch; + +type DefaultInput = RecordBatch; type DefaultOutput = Vec<DataFileBuilder>; +/// The builder for iceberg writer. +#[allow(async_fn_in_trait)] +pub trait IcebergWriterBuilder<I = DefaultInput, O = DefaultOutput>: + Send + Clone + 'static +{ + /// The associated writer type. + type R: IcebergWriter<I, O>; + /// Build the iceberg writer. + async fn build(self) -> Result<Self::R>; Review Comment: Should we follow the pattern in `FileWriterBuilder` to return `impl Future`? ########## crates/iceberg/src/writer/base_writer/data_file_writer.rs: ########## @@ -0,0 +1,310 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! This module provide `DataFileWriter`. + +use crate::spec::{DataContentType, DataFileBuilder}; +use crate::writer::file_writer::FileWriter; +use crate::writer::CurrentFileStatus; +use crate::writer::{file_writer::FileWriterBuilder, IcebergWriter, IcebergWriterBuilder}; +use crate::Result; +use arrow_array::RecordBatch; +use itertools::Itertools; + +/// Builder for `DataFileWriter`. +#[derive(Clone)] +pub struct DataFileWriterBuilder<B: FileWriterBuilder> { + inner: B, +} + +impl<B: FileWriterBuilder> DataFileWriterBuilder<B> { + /// Create a new `DataFileWriterBuilder` using a `FileWriterBuilder`. + pub fn new(inner: B) -> Self { + Self { inner } + } +} + +#[allow(async_fn_in_trait)] +impl<B: FileWriterBuilder> IcebergWriterBuilder for DataFileWriterBuilder<B> { + type R = DataFileWriter<B>; + + async fn build(self) -> Result<Self::R> { + Ok(DataFileWriter { + inner_writer: self.inner.clone().build().await?, + builder: self.inner, + }) + } +} + +/// A writer write data is within one spec/partition. +pub struct DataFileWriter<B: FileWriterBuilder> { + builder: B, + inner_writer: B::R, +} + +#[async_trait::async_trait] +impl<B: FileWriterBuilder> IcebergWriter for DataFileWriter<B> { + async fn write(&mut self, batch: RecordBatch) -> Result<()> { + self.inner_writer.write(&batch).await + } + + async fn flush(&mut self) -> Result<Vec<DataFileBuilder>> { + let writer = std::mem::replace(&mut self.inner_writer, self.builder.clone().build().await?); + let res = writer + .close() + .await? + .into_iter() + .map(|mut res| { + res.content(DataContentType::Data); + res + }) + .collect_vec(); + Ok(res) + } +} + +impl<B: FileWriterBuilder> CurrentFileStatus for DataFileWriter<B> { + fn current_file_path(&self) -> String { + self.inner_writer.current_file_path() + } + + fn current_row_num(&self) -> usize { + self.inner_writer.current_row_num() + } + + fn current_written_size(&self) -> usize { + self.inner_writer.current_written_size() + } +} + +#[cfg(test)] +mod test { + use std::{collections::HashMap, sync::Arc}; + + use arrow_array::{types::Int64Type, ArrayRef, Int64Array, RecordBatch, StructArray}; + use parquet::{arrow::PARQUET_FIELD_ID_META_KEY, file::properties::WriterProperties}; + use tempfile::TempDir; + + use crate::{ + io::FileIOBuilder, + spec::{DataFileFormat, Struct}, + writer::{ + base_writer::data_file_writer::DataFileWriterBuilder, + file_writer::{ + location_generator::{test::MockLocationGenerator, DefaultFileNameGenerator}, + ParquetWriterBuilder, + }, + tests::check_parquet_data_file, + IcebergWriter, IcebergWriterBuilder, + }, + }; + + #[tokio::test] + async fn test_data_file_writer() -> Result<(), anyhow::Error> { + let temp_dir = TempDir::new().unwrap(); + let file_io = FileIOBuilder::new_fs_io().build().unwrap(); + let location_gen = + MockLocationGenerator::new(temp_dir.path().to_str().unwrap().to_string()); + let file_name_gen = + DefaultFileNameGenerator::new("test".to_string(), None, DataFileFormat::Parquet); + + // prepare data + // Int, Struct(Int), String, List(Int), Struct(Struct(Int)) + let schema = { + let fields = vec![ + arrow_schema::Field::new("col0", arrow_schema::DataType::Int64, true) + .with_metadata(HashMap::from([( + PARQUET_FIELD_ID_META_KEY.to_string(), + "0".to_string(), + )])), + arrow_schema::Field::new( + "col1", + arrow_schema::DataType::Struct( + vec![arrow_schema::Field::new( + "sub_col", + arrow_schema::DataType::Int64, + true, + ) + .with_metadata(HashMap::from([( + PARQUET_FIELD_ID_META_KEY.to_string(), + "-1".to_string(), + )]))] + .into(), + ), + true, + ) + .with_metadata(HashMap::from([( + PARQUET_FIELD_ID_META_KEY.to_string(), + "1".to_string(), + )])), + arrow_schema::Field::new("col2", arrow_schema::DataType::Utf8, true).with_metadata( + HashMap::from([(PARQUET_FIELD_ID_META_KEY.to_string(), "2".to_string())]), + ), + arrow_schema::Field::new( + "col3", + arrow_schema::DataType::List(Arc::new( + arrow_schema::Field::new("item", arrow_schema::DataType::Int64, true) + .with_metadata(HashMap::from([( + PARQUET_FIELD_ID_META_KEY.to_string(), + "-1".to_string(), Review Comment: Why the field id is "-1"? ########## crates/iceberg/src/writer/base_writer/data_file_writer.rs: ########## @@ -0,0 +1,310 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! This module provide `DataFileWriter`. + +use crate::spec::{DataContentType, DataFileBuilder}; +use crate::writer::file_writer::FileWriter; +use crate::writer::CurrentFileStatus; +use crate::writer::{file_writer::FileWriterBuilder, IcebergWriter, IcebergWriterBuilder}; +use crate::Result; +use arrow_array::RecordBatch; +use itertools::Itertools; + +/// Builder for `DataFileWriter`. +#[derive(Clone)] +pub struct DataFileWriterBuilder<B: FileWriterBuilder> { + inner: B, +} + +impl<B: FileWriterBuilder> DataFileWriterBuilder<B> { + /// Create a new `DataFileWriterBuilder` using a `FileWriterBuilder`. + pub fn new(inner: B) -> Self { + Self { inner } + } +} + +#[allow(async_fn_in_trait)] +impl<B: FileWriterBuilder> IcebergWriterBuilder for DataFileWriterBuilder<B> { + type R = DataFileWriter<B>; + + async fn build(self) -> Result<Self::R> { + Ok(DataFileWriter { + inner_writer: self.inner.clone().build().await?, + builder: self.inner, + }) + } +} + +/// A writer write data is within one spec/partition. +pub struct DataFileWriter<B: FileWriterBuilder> { + builder: B, + inner_writer: B::R, +} + +#[async_trait::async_trait] +impl<B: FileWriterBuilder> IcebergWriter for DataFileWriter<B> { + async fn write(&mut self, batch: RecordBatch) -> Result<()> { + self.inner_writer.write(&batch).await + } + + async fn flush(&mut self) -> Result<Vec<DataFileBuilder>> { Review Comment: Instead of returning a builder, could we return `DataFile` by storing necessary fields in writer? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org