liurenjie1024 commented on code in PR #176: URL: https://github.com/apache/iceberg-rust/pull/176#discussion_r1473864622
########## crates/iceberg/src/error.rs: ########## @@ -44,6 +44,9 @@ pub enum ErrorKind { /// /// This error is returned when given iceberg feature is not supported. FeatureUnsupported, + /// Iceberg meets an unexpected parameter. Please refer the related doc or error message to + /// check if the parameter is valid. + UnexpectedParameter, Review Comment: I think this is similar to `Unexpected`? ########## crates/iceberg/src/writer/file_writer/parquet_writer.rs: ########## @@ -0,0 +1,390 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the file writer for parquet file format. + +use std::{ + collections::HashMap, + sync::{atomic::AtomicI64, Arc}, +}; + +use crate::Result; +use crate::{ + io::{FileIO, OutputFile}, + spec::{DataFileBuilder, DataFileFormat}, + writer::CurrentFileStatus, + Error, +}; +use arrow_schema::SchemaRef; +use parquet::{arrow::AsyncArrowWriter, format::FileMetaData}; +use parquet::{arrow::PARQUET_FIELD_ID_META_KEY, file::properties::WriterProperties}; + +use super::{ + location_generator::LocationGenerator, track_writer::TrackWriter, FileWriter, FileWriterBuilder, +}; + +/// ParquetWriterBuilder is used to builder a [`ParquetWriter`] +#[derive(Clone)] +pub struct ParquetWriterBuilder { + /// `buffer_size` determines the initial size of the intermediate buffer. + /// The intermediate buffer will automatically be resized if necessary + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, +} + +impl ParquetWriterBuilder { + /// To avoid EntiryTooSmall error, we set the minimum buffer size to 8MB if the given buffer size is smaller than it. + const MIN_BUFFER_SIZE: usize = 8 * 1024 * 1024; + + /// Create a new `ParquetWriterBuilder` + /// To construct the write result, the schema should contain the `PARQUET_FIELD_ID_META_KEY` metadata for each field. + pub fn new( + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, + ) -> Self { + Self { + init_buffer_size, + props, + schema, + file_io, + location_generator, + } + } +} + +impl FileWriterBuilder for ParquetWriterBuilder { + type R = ParquetWriter; + + async fn build(self) -> crate::Result<Self::R> { + // Fetch field id from schema + let field_ids = self + .schema + .fields() + .iter() + .map(|field| { + field + .metadata() + .get(PARQUET_FIELD_ID_META_KEY) + .ok_or_else(|| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id not found in schema", Review Comment: ```suggestion "Filed id not found in arrow schema metadata.", ``` ########## crates/iceberg/src/writer/file_writer/parquet_writer.rs: ########## @@ -0,0 +1,390 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the file writer for parquet file format. + +use std::{ + collections::HashMap, + sync::{atomic::AtomicI64, Arc}, +}; + +use crate::Result; +use crate::{ + io::{FileIO, OutputFile}, + spec::{DataFileBuilder, DataFileFormat}, + writer::CurrentFileStatus, + Error, +}; +use arrow_schema::SchemaRef; +use parquet::{arrow::AsyncArrowWriter, format::FileMetaData}; +use parquet::{arrow::PARQUET_FIELD_ID_META_KEY, file::properties::WriterProperties}; + +use super::{ + location_generator::LocationGenerator, track_writer::TrackWriter, FileWriter, FileWriterBuilder, +}; + +/// ParquetWriterBuilder is used to builder a [`ParquetWriter`] +#[derive(Clone)] +pub struct ParquetWriterBuilder { + /// `buffer_size` determines the initial size of the intermediate buffer. + /// The intermediate buffer will automatically be resized if necessary + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, Review Comment: To improve readability, I would suggest to use alias `ArrowSchemaRef` here. ########## crates/iceberg/src/writer/file_writer/parquet_writer.rs: ########## @@ -0,0 +1,390 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the file writer for parquet file format. + +use std::{ + collections::HashMap, + sync::{atomic::AtomicI64, Arc}, +}; + +use crate::Result; +use crate::{ + io::{FileIO, OutputFile}, + spec::{DataFileBuilder, DataFileFormat}, + writer::CurrentFileStatus, + Error, +}; +use arrow_schema::SchemaRef; +use parquet::{arrow::AsyncArrowWriter, format::FileMetaData}; +use parquet::{arrow::PARQUET_FIELD_ID_META_KEY, file::properties::WriterProperties}; + +use super::{ + location_generator::LocationGenerator, track_writer::TrackWriter, FileWriter, FileWriterBuilder, +}; + +/// ParquetWriterBuilder is used to builder a [`ParquetWriter`] +#[derive(Clone)] +pub struct ParquetWriterBuilder { + /// `buffer_size` determines the initial size of the intermediate buffer. + /// The intermediate buffer will automatically be resized if necessary + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, +} + +impl ParquetWriterBuilder { + /// To avoid EntiryTooSmall error, we set the minimum buffer size to 8MB if the given buffer size is smaller than it. + const MIN_BUFFER_SIZE: usize = 8 * 1024 * 1024; + + /// Create a new `ParquetWriterBuilder` + /// To construct the write result, the schema should contain the `PARQUET_FIELD_ID_META_KEY` metadata for each field. + pub fn new( + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, + ) -> Self { + Self { + init_buffer_size, + props, + schema, + file_io, + location_generator, + } + } +} + +impl FileWriterBuilder for ParquetWriterBuilder { + type R = ParquetWriter; + + async fn build(self) -> crate::Result<Self::R> { + // Fetch field id from schema + let field_ids = self + .schema + .fields() + .iter() + .map(|field| { + field + .metadata() + .get(PARQUET_FIELD_ID_META_KEY) + .ok_or_else(|| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id not found in schema", + ) + })? + .parse::<i32>() + .map_err(|err| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id parse error", + ) + .with_source(err) + }) + }) + .collect::<crate::Result<Vec<_>>>()?; + + let written_size = Arc::new(AtomicI64::new(0)); + let file_path = self.location_generator.generate_name(); + let out_file = self.file_io.new_output(file_path)?; + let inner_writer = TrackWriter::new(out_file.writer().await?, written_size.clone()); + let init_buffer_size = if self.init_buffer_size < Self::MIN_BUFFER_SIZE { + Self::MIN_BUFFER_SIZE + } else { + self.init_buffer_size + }; + let writer = AsyncArrowWriter::try_new( + inner_writer, + self.schema.clone(), + init_buffer_size, + Some(self.props), + ) + .map_err(|err| { + Error::new( + crate::ErrorKind::Unexpected, + "build error from parquet writer", Review Comment: ```suggestion "Failed to build parquet writer", ``` ########## crates/iceberg/src/writer/file_writer/location_generator.rs: ########## @@ -0,0 +1,251 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the location generator for file writer. + +use std::str::FromStr; +use std::sync::atomic::AtomicUsize; +use uuid::Uuid; + +use crate::spec::{DataFileFormat, TableMetadata}; +use crate::{Error, ErrorKind, Result}; + +/// LocationGenerator will generate a file location for the writer. +pub trait LocationGenerator: Send + Sync + 'static { + /// Generate a related file location for the writer. Review Comment: This comment is a little confusing, is the result a full path or related to table location? ########## crates/iceberg/src/writer/file_writer/parquet_writer.rs: ########## @@ -0,0 +1,390 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the file writer for parquet file format. + +use std::{ + collections::HashMap, + sync::{atomic::AtomicI64, Arc}, +}; + +use crate::Result; +use crate::{ + io::{FileIO, OutputFile}, + spec::{DataFileBuilder, DataFileFormat}, + writer::CurrentFileStatus, + Error, +}; +use arrow_schema::SchemaRef; +use parquet::{arrow::AsyncArrowWriter, format::FileMetaData}; +use parquet::{arrow::PARQUET_FIELD_ID_META_KEY, file::properties::WriterProperties}; + +use super::{ + location_generator::LocationGenerator, track_writer::TrackWriter, FileWriter, FileWriterBuilder, +}; + +/// ParquetWriterBuilder is used to builder a [`ParquetWriter`] +#[derive(Clone)] +pub struct ParquetWriterBuilder { + /// `buffer_size` determines the initial size of the intermediate buffer. + /// The intermediate buffer will automatically be resized if necessary + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, +} + +impl ParquetWriterBuilder { + /// To avoid EntiryTooSmall error, we set the minimum buffer size to 8MB if the given buffer size is smaller than it. + const MIN_BUFFER_SIZE: usize = 8 * 1024 * 1024; + + /// Create a new `ParquetWriterBuilder` + /// To construct the write result, the schema should contain the `PARQUET_FIELD_ID_META_KEY` metadata for each field. + pub fn new( + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, + ) -> Self { + Self { + init_buffer_size, + props, + schema, + file_io, + location_generator, + } + } +} + +impl FileWriterBuilder for ParquetWriterBuilder { + type R = ParquetWriter; + + async fn build(self) -> crate::Result<Self::R> { + // Fetch field id from schema + let field_ids = self + .schema + .fields() + .iter() + .map(|field| { + field + .metadata() + .get(PARQUET_FIELD_ID_META_KEY) + .ok_or_else(|| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id not found in schema", + ) + })? + .parse::<i32>() + .map_err(|err| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id parse error", + ) + .with_source(err) + }) + }) + .collect::<crate::Result<Vec<_>>>()?; + + let written_size = Arc::new(AtomicI64::new(0)); + let file_path = self.location_generator.generate_name(); + let out_file = self.file_io.new_output(file_path)?; + let inner_writer = TrackWriter::new(out_file.writer().await?, written_size.clone()); + let init_buffer_size = if self.init_buffer_size < Self::MIN_BUFFER_SIZE { + Self::MIN_BUFFER_SIZE + } else { + self.init_buffer_size + }; + let writer = AsyncArrowWriter::try_new( + inner_writer, + self.schema.clone(), + init_buffer_size, + Some(self.props), + ) + .map_err(|err| { + Error::new( + crate::ErrorKind::Unexpected, + "build error from parquet writer", + ) + .with_source(err) + })?; + + Ok(ParquetWriter { + writer, + written_size, + current_row_num: 0, + out_file, + file_io: self.file_io, + field_ids, + }) + } +} + +/// `ParquetWriter`` is used to write arrow data into parquet file on storage. +pub struct ParquetWriter { Review Comment: Is it possible to make `ParquetWriter<F: FileWriter>`? I think it's hidden under the `FileWriter` trait? ########## crates/iceberg/src/writer/file_writer/location_generator.rs: ########## @@ -0,0 +1,251 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the location generator for file writer. + +use std::str::FromStr; +use std::sync::atomic::AtomicUsize; +use uuid::Uuid; + +use crate::spec::{DataFileFormat, TableMetadata}; +use crate::{Error, ErrorKind, Result}; + +/// LocationGenerator will generate a file location for the writer. +pub trait LocationGenerator: Send + Sync + 'static { + /// Generate a related file location for the writer. Review Comment: Is this similar to `LocationProvider` in java api? ########## crates/iceberg/src/writer/file_writer/parquet_writer.rs: ########## @@ -0,0 +1,390 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the file writer for parquet file format. + +use std::{ + collections::HashMap, + sync::{atomic::AtomicI64, Arc}, +}; + +use crate::Result; +use crate::{ + io::{FileIO, OutputFile}, + spec::{DataFileBuilder, DataFileFormat}, + writer::CurrentFileStatus, + Error, +}; +use arrow_schema::SchemaRef; +use parquet::{arrow::AsyncArrowWriter, format::FileMetaData}; +use parquet::{arrow::PARQUET_FIELD_ID_META_KEY, file::properties::WriterProperties}; + +use super::{ + location_generator::LocationGenerator, track_writer::TrackWriter, FileWriter, FileWriterBuilder, +}; + +/// ParquetWriterBuilder is used to builder a [`ParquetWriter`] +#[derive(Clone)] +pub struct ParquetWriterBuilder { + /// `buffer_size` determines the initial size of the intermediate buffer. + /// The intermediate buffer will automatically be resized if necessary + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, Review Comment: Is it possible to make `ParquetWriterBuilder` generics on `LocationGenerator`? ########## crates/iceberg/src/writer/file_writer/parquet_writer.rs: ########## @@ -0,0 +1,390 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the file writer for parquet file format. + +use std::{ + collections::HashMap, + sync::{atomic::AtomicI64, Arc}, +}; + +use crate::Result; +use crate::{ + io::{FileIO, OutputFile}, + spec::{DataFileBuilder, DataFileFormat}, + writer::CurrentFileStatus, + Error, +}; +use arrow_schema::SchemaRef; +use parquet::{arrow::AsyncArrowWriter, format::FileMetaData}; +use parquet::{arrow::PARQUET_FIELD_ID_META_KEY, file::properties::WriterProperties}; + +use super::{ + location_generator::LocationGenerator, track_writer::TrackWriter, FileWriter, FileWriterBuilder, +}; + +/// ParquetWriterBuilder is used to builder a [`ParquetWriter`] +#[derive(Clone)] +pub struct ParquetWriterBuilder { + /// `buffer_size` determines the initial size of the intermediate buffer. + /// The intermediate buffer will automatically be resized if necessary + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, +} + +impl ParquetWriterBuilder { + /// To avoid EntiryTooSmall error, we set the minimum buffer size to 8MB if the given buffer size is smaller than it. + const MIN_BUFFER_SIZE: usize = 8 * 1024 * 1024; + + /// Create a new `ParquetWriterBuilder` + /// To construct the write result, the schema should contain the `PARQUET_FIELD_ID_META_KEY` metadata for each field. + pub fn new( + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, + ) -> Self { + Self { + init_buffer_size, + props, + schema, + file_io, + location_generator, + } + } +} + +impl FileWriterBuilder for ParquetWriterBuilder { + type R = ParquetWriter; + + async fn build(self) -> crate::Result<Self::R> { + // Fetch field id from schema + let field_ids = self + .schema + .fields() + .iter() + .map(|field| { + field + .metadata() + .get(PARQUET_FIELD_ID_META_KEY) + .ok_or_else(|| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id not found in schema", + ) + })? + .parse::<i32>() + .map_err(|err| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id parse error", + ) + .with_source(err) + }) + }) + .collect::<crate::Result<Vec<_>>>()?; + + let written_size = Arc::new(AtomicI64::new(0)); + let file_path = self.location_generator.generate_name(); + let out_file = self.file_io.new_output(file_path)?; + let inner_writer = TrackWriter::new(out_file.writer().await?, written_size.clone()); + let init_buffer_size = if self.init_buffer_size < Self::MIN_BUFFER_SIZE { Review Comment: ```suggestion let init_buffer_size = max(Self::MIN_BUFFER_SIZE, self.init_buffer_size); ``` ########## crates/iceberg/src/writer/file_writer/parquet_writer.rs: ########## @@ -0,0 +1,390 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the file writer for parquet file format. + +use std::{ + collections::HashMap, + sync::{atomic::AtomicI64, Arc}, +}; + +use crate::Result; +use crate::{ + io::{FileIO, OutputFile}, + spec::{DataFileBuilder, DataFileFormat}, + writer::CurrentFileStatus, + Error, +}; +use arrow_schema::SchemaRef; +use parquet::{arrow::AsyncArrowWriter, format::FileMetaData}; +use parquet::{arrow::PARQUET_FIELD_ID_META_KEY, file::properties::WriterProperties}; + +use super::{ + location_generator::LocationGenerator, track_writer::TrackWriter, FileWriter, FileWriterBuilder, +}; + +/// ParquetWriterBuilder is used to builder a [`ParquetWriter`] +#[derive(Clone)] +pub struct ParquetWriterBuilder { + /// `buffer_size` determines the initial size of the intermediate buffer. + /// The intermediate buffer will automatically be resized if necessary + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, +} + +impl ParquetWriterBuilder { + /// To avoid EntiryTooSmall error, we set the minimum buffer size to 8MB if the given buffer size is smaller than it. + const MIN_BUFFER_SIZE: usize = 8 * 1024 * 1024; + + /// Create a new `ParquetWriterBuilder` + /// To construct the write result, the schema should contain the `PARQUET_FIELD_ID_META_KEY` metadata for each field. + pub fn new( + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, + ) -> Self { + Self { + init_buffer_size, + props, + schema, + file_io, + location_generator, + } + } +} + +impl FileWriterBuilder for ParquetWriterBuilder { + type R = ParquetWriter; + + async fn build(self) -> crate::Result<Self::R> { + // Fetch field id from schema + let field_ids = self + .schema + .fields() + .iter() + .map(|field| { + field + .metadata() + .get(PARQUET_FIELD_ID_META_KEY) + .ok_or_else(|| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id not found in schema", + ) + })? + .parse::<i32>() + .map_err(|err| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id parse error", Review Comment: ```suggestion "Failed to parse field id", ``` ########## crates/iceberg/src/writer/file_writer/track_writer.rs: ########## @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::{ + pin::Pin, + sync::{atomic::AtomicI64, Arc}, +}; + +use tokio::io::AsyncWrite; + +use crate::io::FileWrite; + +/// `TrackWriter` is used to track the written size. +pub struct TrackWriter { Review Comment: Do we need to expose this? I think `pub(crate)` would be enough? ########## crates/iceberg/src/writer/file_writer/parquet_writer.rs: ########## @@ -0,0 +1,390 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! The module contains the file writer for parquet file format. + +use std::{ + collections::HashMap, + sync::{atomic::AtomicI64, Arc}, +}; + +use crate::Result; +use crate::{ + io::{FileIO, OutputFile}, + spec::{DataFileBuilder, DataFileFormat}, + writer::CurrentFileStatus, + Error, +}; +use arrow_schema::SchemaRef; +use parquet::{arrow::AsyncArrowWriter, format::FileMetaData}; +use parquet::{arrow::PARQUET_FIELD_ID_META_KEY, file::properties::WriterProperties}; + +use super::{ + location_generator::LocationGenerator, track_writer::TrackWriter, FileWriter, FileWriterBuilder, +}; + +/// ParquetWriterBuilder is used to builder a [`ParquetWriter`] +#[derive(Clone)] +pub struct ParquetWriterBuilder { + /// `buffer_size` determines the initial size of the intermediate buffer. + /// The intermediate buffer will automatically be resized if necessary + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, +} + +impl ParquetWriterBuilder { + /// To avoid EntiryTooSmall error, we set the minimum buffer size to 8MB if the given buffer size is smaller than it. + const MIN_BUFFER_SIZE: usize = 8 * 1024 * 1024; + + /// Create a new `ParquetWriterBuilder` + /// To construct the write result, the schema should contain the `PARQUET_FIELD_ID_META_KEY` metadata for each field. + pub fn new( + init_buffer_size: usize, + props: WriterProperties, + schema: SchemaRef, + file_io: FileIO, + location_generator: Arc<dyn LocationGenerator>, + ) -> Self { + Self { + init_buffer_size, + props, + schema, + file_io, + location_generator, + } + } +} + +impl FileWriterBuilder for ParquetWriterBuilder { + type R = ParquetWriter; + + async fn build(self) -> crate::Result<Self::R> { + // Fetch field id from schema + let field_ids = self + .schema + .fields() + .iter() + .map(|field| { + field + .metadata() + .get(PARQUET_FIELD_ID_META_KEY) + .ok_or_else(|| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id not found in schema", + ) + })? + .parse::<i32>() + .map_err(|err| { + Error::new( + crate::ErrorKind::UnexpectedParameter, + "field id parse error", + ) + .with_source(err) + }) + }) + .collect::<crate::Result<Vec<_>>>()?; + + let written_size = Arc::new(AtomicI64::new(0)); + let file_path = self.location_generator.generate_name(); + let out_file = self.file_io.new_output(file_path)?; + let inner_writer = TrackWriter::new(out_file.writer().await?, written_size.clone()); + let init_buffer_size = if self.init_buffer_size < Self::MIN_BUFFER_SIZE { + Self::MIN_BUFFER_SIZE + } else { + self.init_buffer_size + }; + let writer = AsyncArrowWriter::try_new( + inner_writer, + self.schema.clone(), + init_buffer_size, + Some(self.props), + ) + .map_err(|err| { + Error::new( + crate::ErrorKind::Unexpected, + "build error from parquet writer", + ) + .with_source(err) + })?; + + Ok(ParquetWriter { + writer, + written_size, + current_row_num: 0, + out_file, + file_io: self.file_io, + field_ids, + }) + } +} + +/// `ParquetWriter`` is used to write arrow data into parquet file on storage. +pub struct ParquetWriter { + out_file: OutputFile, + writer: AsyncArrowWriter<TrackWriter>, + written_size: Arc<AtomicI64>, + current_row_num: usize, + file_io: FileIO, + field_ids: Vec<i32>, +} + +impl ParquetWriter { + fn to_data_file_builder( + field_ids: &[i32], + metadata: FileMetaData, + written_size: usize, + file_path: String, + ) -> Result<DataFileBuilder> { + // Only enter here when the file is not empty. + assert!(!metadata.row_groups.is_empty()); + if field_ids.len() != metadata.row_groups[0].columns.len() { + return Err(Error::new( + crate::ErrorKind::Unexpected, + "len of field id is not match with len of columns in parquet metadata", + )); + } + + let (column_sizes, value_counts, null_value_counts) = + { + let mut per_col_size: HashMap<i32, u64> = HashMap::new(); + let mut per_col_val_num: HashMap<i32, u64> = HashMap::new(); + let mut per_col_null_val_num: HashMap<i32, u64> = HashMap::new(); + metadata.row_groups.iter().for_each(|group| { + group.columns.iter().zip(field_ids.iter()).for_each( + |(column_chunk, &field_id)| { + if let Some(column_chunk_metadata) = &column_chunk.meta_data { + *per_col_size.entry(field_id).or_insert(0) += + column_chunk_metadata.total_compressed_size as u64; + *per_col_val_num.entry(field_id).or_insert(0) += + column_chunk_metadata.num_values as u64; + *per_col_null_val_num.entry(field_id).or_insert(0_u64) += + column_chunk_metadata + .statistics + .as_ref() + .map(|s| s.null_count) + .unwrap_or(None) + .unwrap_or(0) as u64; + } + }, + ) + }); + (per_col_size, per_col_val_num, per_col_null_val_num) + }; + + let mut builder = DataFileBuilder::default(); + builder + .file_path(file_path) + .file_format(DataFileFormat::Parquet) + .record_count(metadata.num_rows as u64) + .file_size_in_bytes(written_size as u64) + .column_sizes(column_sizes) + .value_counts(value_counts) + .null_value_counts(null_value_counts) + // # TODO + // - nan_value_counts + // - lower_bounds + // - upper_bounds + .key_metadata(metadata.footer_signing_key_metadata.unwrap_or_default()) + .split_offsets( + metadata + .row_groups + .iter() + .filter_map(|group| group.file_offset) + .collect(), + ); + Ok(builder) + } +} + +impl FileWriter for ParquetWriter { + async fn write(&mut self, batch: &arrow_array::RecordBatch) -> crate::Result<()> { + self.current_row_num += batch.num_rows(); + self.writer.write(batch).await.map_err(|err| { + Error::new( + crate::ErrorKind::Unexpected, + "write error from parquet writer", + ) + .with_source(err) + })?; + Ok(()) + } + + async fn close(self) -> crate::Result<Vec<crate::spec::DataFileBuilder>> { + let metadata = self.writer.close().await.map_err(|err| { + Error::new( + crate::ErrorKind::Unexpected, + "close error from parquet writer", + ) + .with_source(err) + })?; + if self.current_row_num == 0 { Review Comment: We should move this to first part? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org