fvaleye commented on code in PR #1602: URL: https://github.com/apache/iceberg-rust/pull/1602#discussion_r2375802259
########## crates/integrations/datafusion/src/physical_plan/project.rs: ########## @@ -0,0 +1,511 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Utilities for calculating partition values for Iceberg tables. +//! +//! This module provides functions to calculate partition values from record batches +//! based on Iceberg partition specifications. These utilities are used when writing +//! data to partitioned Iceberg tables. + +use std::sync::Arc; + +use datafusion::arrow::array::{ArrayRef, RecordBatch, StructArray}; +use datafusion::arrow::datatypes::{ + DataType, Field, Schema as ArrowSchema, SchemaRef as ArrowSchemaRef, +}; +use datafusion::common::Result as DFResult; +use datafusion::error::DataFusionError; +use iceberg::spec::{PartitionSpec, Schema}; + +use crate::to_datafusion_error; + +/// Column name for the combined partition values struct +#[allow(dead_code)] +pub(crate) const PARTITION_VALUES_COLUMN: &str = "_iceberg_partition_values"; + +/// Create an output schema by adding a single partition values struct column to the input schema. +/// Returns the original schema unchanged if the table is unpartitioned. +#[allow(dead_code)] +pub(crate) fn create_schema_with_partition_columns( + input_schema: &ArrowSchema, + partition_spec: &PartitionSpec, + table_schema: &Schema, +) -> DFResult<ArrowSchemaRef> { + if partition_spec.is_unpartitioned() { + return Ok(Arc::new(input_schema.clone())); + } + + let mut fields: Vec<Arc<Field>> = input_schema.fields().to_vec(); + + let partition_struct_type = partition_spec + .partition_type(table_schema) + .map_err(to_datafusion_error)?; + + let arrow_struct_type = + iceberg::arrow::type_to_arrow_type(&iceberg::spec::Type::Struct(partition_struct_type)) + .map_err(to_datafusion_error)?; + + fields.push(Arc::new(Field::new( + PARTITION_VALUES_COLUMN, + arrow_struct_type, + false, // Partition values are generally not null + ))); + + Ok(Arc::new(ArrowSchema::new(fields))) +} + +/// Calculate partition values for a record batch and return as a single struct array. +/// Returns None if the table is unpartitioned. +/// +/// # Arguments +/// * `batch` - The record batch to calculate partition values for +/// * `partition_spec` - The partition specification defining the partition fields +/// * `table_schema` - The Iceberg table schema +/// * `expected_partition_type` - The expected Arrow struct type for the partition values +#[allow(dead_code)] +pub(crate) fn calculate_partition_values( Review Comment: Yes, I will implement the PhysicalExpr and create a main entry point `fn project_with_partition(input: &ExecutionPlan, table: &Table) -> Result<Arc<dyn ExecutionPlan>>` for it 👍 -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
