koenvo commented on code in PR #1878: URL: https://github.com/apache/iceberg-python/pull/1878#discussion_r2059189751
########## pyiceberg/table/upsert_util.py: ########## @@ -71,25 +72,59 @@ def get_rows_to_update(source_table: pa.Table, target_table: pa.Table, join_cols # When the target table is empty, there is nothing to update :) return source_table.schema.empty_table() - diff_expr = functools.reduce( - operator.or_, - [ - pc.or_kleene( - pc.not_equal(pc.field(f"{col}-lhs"), pc.field(f"{col}-rhs")), - pc.is_null(pc.not_equal(pc.field(f"{col}-lhs"), pc.field(f"{col}-rhs"))), - ) - for col in non_key_cols - ], + # We need to compare non_key_cols in Python as PyArrow + # 1. Cannot do a join when non-join columns have complex types + # 2. Cannot compare columns with complex types + # See: https://github.com/apache/arrow/issues/35785 + MARKER_COLUMN_NAME = "__from_target" + SOURCE_INDEX_COLUMN_NAME = "__source_index" + TARGET_INDEX_COLUMN_NAME = "__target_index" + + if MARKER_COLUMN_NAME in join_cols or SOURCE_INDEX_COLUMN_NAME in join_cols or TARGET_INDEX_COLUMN_NAME in join_cols: + raise ValueError( + f"{MARKER_COLUMN_NAME}, {SOURCE_INDEX_COLUMN_NAME} and {TARGET_INDEX_COLUMN_NAME} are reserved for joining " + f"DataFrames, and cannot be used as column names" + ) from None + + # Step 1: Prepare source index with join keys and a marker index + # Cast to target table schema, so we can do the join + # See: https://github.com/apache/arrow/issues/37542 + source_index = ( + source_table.cast(target_table.schema) + .select(join_cols_set) + .append_column(SOURCE_INDEX_COLUMN_NAME, pa.array(range(len(source_table)))) ) - return ( - source_table - # We already know that the schema is compatible, this is to fix large_ types - .cast(target_table.schema) - .join(target_table, keys=list(join_cols_set), join_type="inner", left_suffix="-lhs", right_suffix="-rhs") - .filter(diff_expr) - .drop_columns([f"{col}-rhs" for col in non_key_cols]) - .rename_columns({f"{col}-lhs" if col not in join_cols else col: col for col in source_table.column_names}) - # Finally cast to the original schema since it doesn't carry nullability: - # https://github.com/apache/arrow/issues/45557 - ).cast(target_table.schema) + # Step 2: Prepare target index with join keys and a marker + target_index = ( + target_table.select(join_cols_set) + .append_column(TARGET_INDEX_COLUMN_NAME, pa.array(range(len(target_table)))) + .append_column(MARKER_COLUMN_NAME, pa.repeat(True, len(target_table))) + ) + + # Step 3: Perform a left outer join to find which rows from source exist in target + joined = source_index.join(target_index, keys=list(join_cols_set), join_type="left outer") Review Comment: Ah yes, good catch! We can do inner join **and** remove the `matching_indices = joined.filter(pc.field(MARKER_COLUMN_NAME))` step. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org For additional commands, e-mail: issues-h...@iceberg.apache.org