HonahX commented on code in PR #921: URL: https://github.com/apache/iceberg-python/pull/921#discussion_r1676549791
########## pyiceberg/io/pyarrow.py: ########## @@ -2079,36 +2083,63 @@ def _check_schema_compatible(table_schema: Schema, other_schema: pa.Schema, down Raises: ValueError: If the schemas are not compatible. """ - name_mapping = table_schema.name_mapping - try: - task_schema = pyarrow_to_schema( - other_schema, name_mapping=name_mapping, downcast_ns_timestamp_to_us=downcast_ns_timestamp_to_us - ) - except ValueError as e: - other_schema = _pyarrow_to_schema_without_ids(other_schema, downcast_ns_timestamp_to_us=downcast_ns_timestamp_to_us) - additional_names = set(other_schema.column_names) - set(table_schema.column_names) - raise ValueError( - f"PyArrow table contains more columns: {', '.join(sorted(additional_names))}. Update the schema first (hint, use union_by_name)." - ) from e - - if table_schema.as_struct() != task_schema.as_struct(): - from rich.console import Console - from rich.table import Table as RichTable + task_schema = assign_fresh_schema_ids( + _pyarrow_to_schema_without_ids(other_schema, downcast_ns_timestamp_to_us=downcast_ns_timestamp_to_us) + ) - console = Console(record=True) + extra_fields = task_schema.field_names - table_schema.field_names + missing_fields = table_schema.field_names - task_schema.field_names + fields_in_both = task_schema.field_names.intersection(table_schema.field_names) + + from rich.console import Console + from rich.table import Table as RichTable + + console = Console(record=True) + + rich_table = RichTable(show_header=True, header_style="bold") + rich_table.add_column("Field Name") + rich_table.add_column("Category") + rich_table.add_column("Table field") + rich_table.add_column("Dataframe field") + + def print_nullability(required: bool) -> str: + return "required" if required else "optional" + + for field_name in fields_in_both: + lhs = table_schema.find_field(field_name) + rhs = task_schema.find_field(field_name) + # Check nullability + if lhs.required != rhs.required: + rich_table.add_row( + field_name, + "Nullability", + f"{print_nullability(lhs.required)} {str(lhs.field_type)}", + f"{print_nullability(rhs.required)} {str(rhs.field_type)}", + ) + # Check if type is consistent + if any( + (isinstance(lhs.field_type, container_type) and isinstance(rhs.field_type, container_type)) + for container_type in {StructType, MapType, ListType} + ): + continue + elif lhs.field_type != rhs.field_type: + rich_table.add_row( + field_name, + "Type", + f"{print_nullability(lhs.required)} {str(lhs.field_type)}", + f"{print_nullability(rhs.required)} {str(rhs.field_type)}", + ) Review Comment: I am thinking if we can be less restrictive on type. If the rhs's type can be [promoted](https://github.com/apache/iceberg-python/blob/cf3bf8a977f80f986237bc62293666de327871b3/pyiceberg/schema.py#L1550-L1564) to lhs's type, the case may still be considered as compatible: ```python elif lhs.field_type != rhs.field_type: try: promote(rhs.field_type, lhs.field_type) except ResolveError: rich_table.add_row( field_name, "Type", f"{print_nullability(lhs.required)} {str(lhs.field_type)}", f"{print_nullability(rhs.required)} {str(rhs.field_type)}", ) ``` <details> <summary>Example Test case</summary> ```python def test_schema_uuid() -> None: table_schema = Schema( NestedField(field_id=1, name="foo", field_type=StringType(), required=False), NestedField(field_id=2, name="bar", field_type=IntegerType(), required=True), NestedField(field_id=3, name="baz", field_type=UUIDType(), required=False), schema_id=1, identifier_field_ids=[2], ) other_schema = pa.schema(( pa.field("foo", pa.large_string(), nullable=True), pa.field("bar", pa.int32(), nullable=False), pa.field("baz", pa.binary(16), nullable=True), )) _check_schema_compatible(table_schema, other_schema) other_schema_fail = pa.schema(( pa.field("foo", pa.large_string(), nullable=True), pa.field("bar", pa.int32(), nullable=False), pa.field("baz", pa.binary(15), nullable=True), )) with pytest.raises(ValueError): _check_schema_compatible(table_schema, other_schema_fail) ``` </details> This could be a possible solution for https://github.com/apache/iceberg-python/issues/855, and should also covers the situation of writing pa.int32() (IntegerType) to LongType ########## tests/integration/test_writes/test_writes.py: ########## @@ -964,18 +964,38 @@ def test_sanitize_character_partitioned(catalog: Catalog) -> None: assert len(tbl.scan().to_arrow()) == 22 +@pytest.mark.integration @pytest.mark.parametrize("format_version", [1, 2]) -def table_write_subset_of_schema(session_catalog: Catalog, arrow_table_with_null: pa.Table, format_version: int) -> None: - identifier = "default.table_append_subset_of_schema" +def test_table_write_subset_of_schema(session_catalog: Catalog, arrow_table_with_null: pa.Table, format_version: int) -> None: + identifier = "default.test_table_write_subset_of_schema" tbl = _create_table(session_catalog, identifier, {"format-version": format_version}, [arrow_table_with_null]) arrow_table_without_some_columns = arrow_table_with_null.combine_chunks().drop(arrow_table_with_null.column_names[0]) + print(arrow_table_without_some_columns.schema) + print(arrow_table_with_null.schema) Review Comment: ```suggestion ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org