kevinjqliu commented on code in PR #1345:
URL: https://github.com/apache/iceberg-python/pull/1345#discussion_r1896992186


##########
tests/integration/test_writes/test_partitioned_writes.py:
##########
@@ -719,50 +719,105 @@ def test_invalid_arguments(spark: SparkSession, 
session_catalog: Catalog) -> Non
 @pytest.mark.parametrize(
     "spec",
     [
-        # mixed with non-identity is not supported
-        (
-            PartitionSpec(
-                PartitionField(source_id=4, field_id=1001, 
transform=BucketTransform(2), name="int_bucket"),
-                PartitionField(source_id=1, field_id=1002, 
transform=IdentityTransform(), name="bool"),
-            )
-        ),
-        # none of non-identity is supported
-        (PartitionSpec(PartitionField(source_id=4, field_id=1001, 
transform=BucketTransform(2), name="int_bucket"))),
-        (PartitionSpec(PartitionField(source_id=5, field_id=1001, 
transform=BucketTransform(2), name="long_bucket"))),
-        (PartitionSpec(PartitionField(source_id=10, field_id=1001, 
transform=BucketTransform(2), name="date_bucket"))),
-        (PartitionSpec(PartitionField(source_id=8, field_id=1001, 
transform=BucketTransform(2), name="timestamp_bucket"))),
-        (PartitionSpec(PartitionField(source_id=9, field_id=1001, 
transform=BucketTransform(2), name="timestamptz_bucket"))),
-        (PartitionSpec(PartitionField(source_id=2, field_id=1001, 
transform=BucketTransform(2), name="string_bucket"))),
-        (PartitionSpec(PartitionField(source_id=12, field_id=1001, 
transform=BucketTransform(2), name="fixed_bucket"))),
-        (PartitionSpec(PartitionField(source_id=11, field_id=1001, 
transform=BucketTransform(2), name="binary_bucket"))),
         (PartitionSpec(PartitionField(source_id=4, field_id=1001, 
transform=TruncateTransform(2), name="int_trunc"))),
         (PartitionSpec(PartitionField(source_id=5, field_id=1001, 
transform=TruncateTransform(2), name="long_trunc"))),
         (PartitionSpec(PartitionField(source_id=2, field_id=1001, 
transform=TruncateTransform(2), name="string_trunc"))),
-        (PartitionSpec(PartitionField(source_id=11, field_id=1001, 
transform=TruncateTransform(2), name="binary_trunc"))),
     ],
 )
-def test_unsupported_transform(
-    spec: PartitionSpec, spark: SparkSession, session_catalog: Catalog, 
arrow_table_with_null: pa.Table
+@pytest.mark.parametrize("format_version", [1, 2])
+def test_truncate_transform(
+    spec: PartitionSpec,
+    spark: SparkSession,
+    session_catalog: Catalog,
+    arrow_table_with_null: pa.Table,
+    format_version: int,
 ) -> None:
-    identifier = "default.unsupported_transform"
+    identifier = "default.truncate_transform"
 
     try:
         session_catalog.drop_table(identifier=identifier)
     except NoSuchTableError:
         pass
 
-    tbl = session_catalog.create_table(
+    tbl = _create_table(
+        session_catalog=session_catalog,
         identifier=identifier,
-        schema=TABLE_SCHEMA,
+        properties={"format-version": str(format_version)},
+        data=[arrow_table_with_null],
         partition_spec=spec,
-        properties={"format-version": "1"},
     )
 
-    with pytest.raises(
-        ValueError,
-        match="Not all partition types are supported for writes. Following 
partitions cannot be written using pyarrow: *",
-    ):
-        tbl.append(arrow_table_with_null)
+    assert tbl.format_version == format_version, f"Expected v{format_version}, 
got: v{tbl.format_version}"
+    df = spark.table(identifier)
+    assert df.count() == 3, f"Expected 3 total rows for {identifier}"
+    for col in arrow_table_with_null.column_names:
+        assert df.where(f"{col} is not null").count() == 2, f"Expected 2 
non-null rows for {col}"
+        assert df.where(f"{col} is null").count() == 1, f"Expected 1 null row 
for {col} is null"
+
+    assert tbl.inspect.partitions().num_rows == 3
+    files_df = spark.sql(
+        f"""
+            SELECT *
+            FROM {identifier}.files
+        """
+    )
+    assert files_df.count() == 3
+
+
+@pytest.mark.integration
+@pytest.mark.parametrize(
+    "spec, expected_rows",
+    [
+        # none of non-identity is supported

Review Comment:
   ```suggestion
   ```



##########
tests/test_transforms.py:
##########
@@ -1563,3 +1561,43 @@ def test_ymd_pyarrow_transforms(
     else:
         with pytest.raises(ValueError):
             
transform.pyarrow_transform(DateType())(arrow_table_date_timestamps[source_col])
+
+
+@pytest.mark.parametrize(
+    "source_type, input_arr, expected, num_buckets",
+    [
+        (IntegerType(), pa.array([1, 2]), pa.array([6, 2], type=pa.int32()), 
10),
+        (
+            IntegerType(),
+            pa.chunked_array([pa.array([1, 2]), pa.array([3, 4])]),
+            pa.chunked_array([pa.array([6, 2], type=pa.int32()), pa.array([5, 
0], type=pa.int32())]),
+            10,
+        ),
+        (IntegerType(), pa.array([1, 2]), pa.array([6, 2], type=pa.int32()), 
10),
+    ],
+)
+def test_bucket_pyarrow_transforms(
+    source_type: PrimitiveType,
+    input_arr: Union[pa.Array, pa.ChunkedArray],
+    expected: Union[pa.Array, pa.ChunkedArray],
+    num_buckets: int,

Review Comment:
   nit: wydt of reordering these for readability? `num_buckets`, `source_type` 
and `input_arr` are configs of the BucketTransform; expected is the output



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to