kevinjqliu commented on code in PR #1036:
URL: https://github.com/apache/iceberg-python/pull/1036#discussion_r1714447553


##########
tests/integration/test_add_files.py:
##########
@@ -732,3 +732,76 @@ def test_add_files_subset_of_schema(spark: SparkSession, 
session_catalog: Catalo
     for column in written_arrow_table.column_names:
         for left, right in zip(lhs[column].to_list(), rhs[column].to_list()):
             assert left == right
+
+
+@pytest.mark.integration
+def test_add_files_with_duplicate_files_in_file_paths(spark: SparkSession, 
session_catalog: Catalog, format_version: int) -> None:
+    identifier = f"default.test_table_duplicate_add_files_v{format_version}"
+    tbl = _create_table(session_catalog, identifier, format_version)
+
+    file_paths = 
[f"s3://warehouse/default/unpartitioned/v{format_version}/test-{i}.parquet" for 
i in range(5)]
+    
file_paths.append(f"s3://warehouse/default/unpartitioned/v{format_version}/test-1.parquet")
+    # write parquet files
+    for file_path in file_paths:
+        fo = tbl.io.new_output(file_path)
+        with fo.create(overwrite=True) as fos:
+            with pq.ParquetWriter(fos, schema=ARROW_SCHEMA) as writer:
+                writer.write_table(ARROW_TABLE)
+
+    # add the parquet files as data files
+    with pytest.raises(ValueError) as exc_info:
+        tbl.add_files(file_paths=file_paths)
+    assert "File paths must be unique" in str(exc_info.value)

Review Comment:
   nit: if this test is checking for uniqueness of `file_paths`, perhaps just 
test specifically for that 
   
   something like
   ```
   file_path = ""
   file_paths = [file_path, file_path]
   
       # add the parquet files as data files
       with pytest.raises(ValueError) as exc_info:
           tbl.add_files(file_paths=file_paths)
       assert "File paths must be unique" in str(exc_info.value)
   ```
   
   



##########
tests/integration/test_add_files.py:
##########
@@ -732,3 +732,76 @@ def test_add_files_subset_of_schema(spark: SparkSession, 
session_catalog: Catalo
     for column in written_arrow_table.column_names:
         for left, right in zip(lhs[column].to_list(), rhs[column].to_list()):
             assert left == right
+
+
+@pytest.mark.integration
+def test_add_files_with_duplicate_files_in_file_paths(spark: SparkSession, 
session_catalog: Catalog, format_version: int) -> None:
+    identifier = f"default.test_table_duplicate_add_files_v{format_version}"
+    tbl = _create_table(session_catalog, identifier, format_version)
+
+    file_paths = 
[f"s3://warehouse/default/unpartitioned/v{format_version}/test-{i}.parquet" for 
i in range(5)]
+    
file_paths.append(f"s3://warehouse/default/unpartitioned/v{format_version}/test-1.parquet")
+    # write parquet files
+    for file_path in file_paths:
+        fo = tbl.io.new_output(file_path)
+        with fo.create(overwrite=True) as fos:
+            with pq.ParquetWriter(fos, schema=ARROW_SCHEMA) as writer:
+                writer.write_table(ARROW_TABLE)
+
+    # add the parquet files as data files
+    with pytest.raises(ValueError) as exc_info:
+        tbl.add_files(file_paths=file_paths)
+    assert "File paths must be unique" in str(exc_info.value)
+
+
+@pytest.mark.integration
+def test_add_files_that_referenced_by_current_snapshot(
+    spark: SparkSession, session_catalog: Catalog, format_version: int
+) -> None:
+    identifier = f"default.test_table_add_referenced_file_v{format_version}"
+    tbl = _create_table(session_catalog, identifier, format_version)
+
+    file_paths = 
[f"s3://warehouse/default/unpartitioned/v{format_version}/test-{i}.parquet" for 
i in range(5)]
+    referenced_file = 
f"s3://warehouse/default/unpartitioned/v{format_version}/test-1.parquet"
+    # write parquet files
+    for file_path in file_paths:
+        fo = tbl.io.new_output(file_path)
+        with fo.create(overwrite=True) as fos:
+            with pq.ParquetWriter(fos, schema=ARROW_SCHEMA) as writer:
+                writer.write_table(ARROW_TABLE)
+
+    # add the parquet files as data files
+    tbl.add_files(file_paths=file_paths)
+
+    with pytest.raises(ValueError) as exc_info:
+        tbl.add_files(file_paths=[referenced_file])
+    assert f"Cannot add files that are already referenced by table, files: 
{referenced_file}" in str(exc_info.value)

Review Comment:
   nit: its not clear to me how `referenced_file` is "referenced" already



##########
tests/integration/test_add_files.py:
##########
@@ -732,3 +732,76 @@ def test_add_files_subset_of_schema(spark: SparkSession, 
session_catalog: Catalo
     for column in written_arrow_table.column_names:
         for left, right in zip(lhs[column].to_list(), rhs[column].to_list()):
             assert left == right
+
+
+@pytest.mark.integration
+def test_add_files_with_duplicate_files_in_file_paths(spark: SparkSession, 
session_catalog: Catalog, format_version: int) -> None:
+    identifier = f"default.test_table_duplicate_add_files_v{format_version}"
+    tbl = _create_table(session_catalog, identifier, format_version)
+
+    file_paths = 
[f"s3://warehouse/default/unpartitioned/v{format_version}/test-{i}.parquet" for 
i in range(5)]
+    
file_paths.append(f"s3://warehouse/default/unpartitioned/v{format_version}/test-1.parquet")
+    # write parquet files
+    for file_path in file_paths:
+        fo = tbl.io.new_output(file_path)
+        with fo.create(overwrite=True) as fos:
+            with pq.ParquetWriter(fos, schema=ARROW_SCHEMA) as writer:
+                writer.write_table(ARROW_TABLE)
+
+    # add the parquet files as data files
+    with pytest.raises(ValueError) as exc_info:
+        tbl.add_files(file_paths=file_paths)
+    assert "File paths must be unique" in str(exc_info.value)
+
+
+@pytest.mark.integration
+def test_add_files_that_referenced_by_current_snapshot(
+    spark: SparkSession, session_catalog: Catalog, format_version: int
+) -> None:
+    identifier = f"default.test_table_add_referenced_file_v{format_version}"
+    tbl = _create_table(session_catalog, identifier, format_version)
+
+    file_paths = 
[f"s3://warehouse/default/unpartitioned/v{format_version}/test-{i}.parquet" for 
i in range(5)]
+    referenced_file = 
f"s3://warehouse/default/unpartitioned/v{format_version}/test-1.parquet"
+    # write parquet files
+    for file_path in file_paths:
+        fo = tbl.io.new_output(file_path)
+        with fo.create(overwrite=True) as fos:
+            with pq.ParquetWriter(fos, schema=ARROW_SCHEMA) as writer:
+                writer.write_table(ARROW_TABLE)
+
+    # add the parquet files as data files
+    tbl.add_files(file_paths=file_paths)
+
+    with pytest.raises(ValueError) as exc_info:
+        tbl.add_files(file_paths=[referenced_file])
+    assert f"Cannot add files that are already referenced by table, files: 
{referenced_file}" in str(exc_info.value)
+
+
+@pytest.mark.integration
+def 
test_add_files_that_referenced_by_current_snapshot_with_check_duplicate_files_false(

Review Comment:
   nit: split this into 2 tests. one for the happy path, another for 
`check_duplicate_files=False`.
   
   What happens when `check_duplicate_files=False` is set and we add files 
already referenced, does this leave the table in an inconsistent (bad) state? 
   
   
   



##########
tests/integration/test_add_files.py:
##########
@@ -732,3 +732,47 @@ def test_add_files_subset_of_schema(spark: SparkSession, 
session_catalog: Catalo
     for column in written_arrow_table.column_names:
         for left, right in zip(lhs[column].to_list(), rhs[column].to_list()):
             assert left == right
+
+
+@pytest.mark.integration
+def test_add_files_with_duplicate_files_in_file_paths(spark: SparkSession, 
session_catalog: Catalog, format_version: int) -> None:

Review Comment:
   nit: I think the `format_version: int` parameterized is missing



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to