Fokko commented on code in PR #931:
URL: https://github.com/apache/iceberg-python/pull/931#discussion_r1829297150


##########
tests/integration/test_writes/test_partitioned_writes.py:
##########
@@ -181,6 +181,61 @@ def test_query_filter_appended_null_partitioned(
     assert len(rows) == 6
 
 
+@pytest.mark.integration
+@pytest.mark.parametrize(
+    "part_col",
+    [
+        "int",
+        "bool",
+        "string",
+        "string_long",
+        "long",
+        "float",
+        "double",
+        "date",
+        "timestamp",
+        "binary",
+        "timestamptz",
+    ],
+)
+@pytest.mark.parametrize(
+    "format_version",
+    [1, 2],
+)
+def test_query_filter_dynamic_partition_overwrite_null_partitioned(
+    session_catalog: Catalog, spark: SparkSession, arrow_table_with_null: 
pa.Table, part_col: str, format_version: int
+) -> None:
+    # Given
+    identifier = 
f"default.arrow_table_v{format_version}_appended_with_null_partitioned_on_col_{part_col}"
+    nested_field = TABLE_SCHEMA.find_field(part_col)
+    partition_spec = PartitionSpec(
+        PartitionField(source_id=nested_field.field_id, field_id=1001, 
transform=IdentityTransform(), name=part_col)
+    )
+
+    # When
+    tbl = _create_table(
+        session_catalog=session_catalog,
+        identifier=identifier,
+        properties={"format-version": str(format_version)},
+        data=[],
+        partition_spec=partition_spec,
+    )
+    # Append with arrow_table_1 with lines [A,B,C] and then arrow_table_2 with 
lines[A,B,C,A,B,C]
+    tbl.append(arrow_table_with_null)
+    tbl.append(pa.concat_tables([arrow_table_with_null, 
arrow_table_with_null]))
+    tbl.dynamic_partition_overwrite(arrow_table_with_null)
+    tbl.dynamic_partition_overwrite(arrow_table_with_null.slice(0, 2))
+    # Then
+    assert tbl.format_version == format_version, f"Expected v{format_version}, 
got: v{tbl.format_version}"
+    df = spark.table(identifier)
+    for col in arrow_table_with_null.column_names:
+        assert df.where(f"{col} is not null").count() == 2, f"Expected 2 
non-null rows for {col},"
+        assert df.where(f"{col} is null").count() == 1, f"Expected 1 null rows 
for {col},"
+    # expecting 3 files:
+    rows = spark.sql(f"select partition from {identifier}.files").collect()
+    assert len(rows) == 3
+

Review Comment:
   ```python
   @pytest.mark.integration
   @pytest.mark.parametrize(
       "format_version",
       [1, 2],
   )
   @pytest.mark.filterwarnings("ignore")
   def test_dynamic_partition_overwrite_evolve_partition(
       spark: SparkSession, session_catalog: Catalog, format_version: int
   ) -> None:
       arrow_table = pa.Table.from_pydict(
           {
               "place": ["Amsterdam", "Drachten"],
               "inhabitants": [921402, 44940],
           },
       )
   
       identifier = 
f"default.partitioned_{format_version}_test_dynamic_partition_overwrite_evolve_partition"
       try:
           session_catalog.drop_table(identifier)
       except:
           pass
   
       tbl = session_catalog.create_table(
           identifier=identifier,
           schema=arrow_table.schema,
           properties={"format-version": str(format_version)},
       )
   
   
       with tbl.transaction() as tx:
           with tx.update_spec() as schema:
               schema.add_identity("place")
   
       tbl.append(arrow_table)
   
       with tbl.transaction() as tx:
           with tx.update_schema() as schema:
               schema.add_column("country", StringType())
           with tx.update_spec() as schema:
               schema.add_identity("country")
   
       arrow_table = pa.Table.from_pydict(
           {
               "place": ["Groningen"],
               "country": ["Netherlands"],
               "inhabitants": [238147],
           },
       )
   
       tbl.dynamic_partition_overwrite(arrow_table)
       result = tbl.scan().to_arrow()
   
       assert result['place'].to_pylist() == ['Groningen', 'Amsterdam', 
'Drachten']
       assert result['inhabitants'].to_pylist() == [238147, 921402, 44940]
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to