Elbehery commented on code in PR #497:
URL: https://github.com/apache/iceberg-go/pull/497#discussion_r2230977110


##########
tests/write_operations_test.go:
##########
@@ -0,0 +1,813 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package tests
+
+import (
+       "context"
+       "fmt"
+       "path/filepath"
+       "strconv"
+       "strings"
+       "testing"
+
+       "github.com/apache/arrow-go/v18/arrow"
+       "github.com/apache/arrow-go/v18/arrow/array"
+       "github.com/apache/arrow-go/v18/arrow/memory"
+       "github.com/apache/arrow-go/v18/parquet/pqarrow"
+       "github.com/apache/iceberg-go"
+       iceio "github.com/apache/iceberg-go/io"
+       "github.com/apache/iceberg-go/table"
+       "github.com/google/uuid"
+       "github.com/stretchr/testify/suite"
+)
+
+// Mock catalog implementation for testing
+type mockedCatalog struct{}
+
+func (m *mockedCatalog) LoadTable(ctx context.Context, ident table.Identifier, 
props iceberg.Properties) (*table.Table, error) {
+       return nil, nil
+}
+
+func (m *mockedCatalog) CommitTable(ctx context.Context, tbl *table.Table, 
reqs []table.Requirement, updates []table.Update) (table.Metadata, string, 
error) {
+       bldr, err := table.MetadataBuilderFromBase(tbl.Metadata())
+       if err != nil {
+               return nil, "", err
+       }
+
+       for _, u := range updates {
+               if err := u.Apply(bldr); err != nil {
+                       return nil, "", err
+               }
+       }
+
+       meta, err := bldr.Build()
+       if err != nil {
+               return nil, "", err
+       }
+
+       return meta, "", nil
+}
+
+type WriteOperationsTestSuite struct {
+       suite.Suite
+       ctx       context.Context
+       location  string
+       tableSchema *iceberg.Schema
+       arrSchema   *arrow.Schema
+       arrTable    arrow.Table
+}
+
+func TestWriteOperations(t *testing.T) {
+       suite.Run(t, new(WriteOperationsTestSuite))
+}
+
+func (s *WriteOperationsTestSuite) SetupSuite() {
+       s.ctx = context.Background()
+       mem := memory.DefaultAllocator
+
+       // Create a test schema
+       s.tableSchema = iceberg.NewSchema(0,
+               iceberg.NestedField{ID: 1, Name: "id", Type: 
iceberg.PrimitiveTypes.Int64, Required: true},
+               iceberg.NestedField{ID: 2, Name: "data", Type: 
iceberg.PrimitiveTypes.String},
+               iceberg.NestedField{ID: 3, Name: "ts", Type: 
iceberg.PrimitiveTypes.Timestamp})
+
+       s.arrSchema = arrow.NewSchema([]arrow.Field{
+               {Name: "id", Type: arrow.PrimitiveTypes.Int64, Nullable: false},
+               {Name: "data", Type: arrow.BinaryTypes.String, Nullable: true},
+               {Name: "ts", Type: &arrow.TimestampType{Unit: 
arrow.Microsecond}, Nullable: true},
+       }, nil)
+
+       // Create test data
+       var err error
+       s.arrTable, err = array.TableFromJSON(mem, s.arrSchema, []string{
+               `[
+                       {"id": 1, "data": "foo", "ts": 1672531200000000},
+                       {"id": 2, "data": "bar", "ts": 1672534800000000},
+                       {"id": 3, "data": "baz", "ts": 1672538400000000}
+               ]`,
+       })
+       s.Require().NoError(err)
+}
+
+func (s *WriteOperationsTestSuite) SetupTest() {
+       s.location = filepath.ToSlash(strings.Replace(s.T().TempDir(), "#", "", 
-1))
+}
+
+func (s *WriteOperationsTestSuite) TearDownSuite() {
+       s.arrTable.Release()
+}
+
+func (s *WriteOperationsTestSuite) getMetadataLoc() string {
+       return fmt.Sprintf("%s/metadata/%05d-%s.metadata.json",
+               s.location, 1, uuid.New().String())
+}
+
+func (s *WriteOperationsTestSuite) writeParquet(fio iceio.WriteFileIO, 
filePath string, arrTbl arrow.Table) {
+       fo, err := fio.Create(filePath)
+       s.Require().NoError(err)
+
+       s.Require().NoError(pqarrow.WriteTable(arrTbl, fo, arrTbl.NumRows(),
+               nil, pqarrow.DefaultWriterProps()))
+}
+
+func (s *WriteOperationsTestSuite) createTable(identifier table.Identifier, 
formatVersion int, spec iceberg.PartitionSpec) *table.Table {
+       meta, err := table.NewMetadata(s.tableSchema, &spec, 
table.UnsortedSortOrder,
+               s.location, iceberg.Properties{"format-version": 
strconv.Itoa(formatVersion)})
+       s.Require().NoError(err)
+
+       return table.New(
+               identifier,
+               meta,
+               s.getMetadataLoc(),
+               func(ctx context.Context) (iceio.IO, error) {
+                       return iceio.LocalFS{}, nil
+               },
+               &mockedCatalog{},
+       )
+}
+
+func (s *WriteOperationsTestSuite) createTableWithData(identifier 
table.Identifier, numFiles int) (*table.Table, []string) {
+       tbl := s.createTable(identifier, 2, *iceberg.UnpartitionedSpec)
+       
+       files := make([]string, 0, numFiles)
+       fs := s.getFS(tbl)
+       
+       for i := 0; i < numFiles; i++ {
+               filePath := fmt.Sprintf("%s/data/test-%d.parquet", s.location, 
i)
+               s.writeParquet(fs, filePath, s.arrTable)
+               files = append(files, filePath)
+       }
+       
+       // Add files to table
+       tx := tbl.NewTransaction()
+       s.Require().NoError(tx.AddFiles(s.ctx, files, nil, false))
+       
+       committedTbl, err := tx.Commit(s.ctx)
+       s.Require().NoError(err)
+       
+       return committedTbl, files
+}
+
+func (s *WriteOperationsTestSuite) getFS(tbl *table.Table) iceio.WriteFileIO {
+       fs, err := tbl.FS(s.ctx)
+       s.Require().NoError(err)
+       return fs.(iceio.WriteFileIO)
+}
+
+func TestRewriteFiles(t *testing.T) {
+       suite.Run(t, &WriteOperationsTestSuite{})
+}
+
+func (s *WriteOperationsTestSuite) TestRewriteFiles() {
+       s.Run("RewriteDataFiles", func() {
+               // Setup table with multiple small files
+               ident := table.Identifier{"default", "rewrite_test_table"}
+               tbl, originalFiles := s.createTableWithData(ident, 3)
+               
+               // Create new consolidated file
+               consolidatedPath := fmt.Sprintf("%s/data/consolidated.parquet", 
s.location)
+               
+               // Create larger dataset for consolidation
+               mem := memory.DefaultAllocator
+               largerTable, err := array.TableFromJSON(mem, s.arrSchema, 
[]string{
+                       `[
+                               {"id": 1, "data": "foo", "ts": 
1672531200000000},
+                               {"id": 2, "data": "bar", "ts": 
1672534800000000},
+                               {"id": 3, "data": "baz", "ts": 
1672538400000000},
+                               {"id": 4, "data": "qux", "ts": 
1672542000000000},
+                               {"id": 5, "data": "quux", "ts": 
1672545600000000}
+                       ]`,
+               })
+               s.Require().NoError(err)
+               defer largerTable.Release()
+               
+               fs := s.getFS(tbl)
+               s.writeParquet(fs, consolidatedPath, largerTable)
+               
+               // Rewrite files (replace multiple small files with one larger 
file)
+               tx := tbl.NewTransaction()
+               err = tx.ReplaceDataFiles(s.ctx, originalFiles, 
[]string{consolidatedPath}, nil)
+               s.Require().NoError(err)
+               
+               newTbl, err := tx.Commit(s.ctx)
+               s.Require().NoError(err)
+               
+               // Verify file count reduction
+               snapshot := newTbl.CurrentSnapshot()
+               s.Require().NotNil(snapshot)
+               
+               // Verify data integrity by scanning
+               scan := newTbl.Scan()
+               results, err := scan.ToArrowTable(s.ctx)
+               s.Require().NoError(err)
+               defer results.Release()
+               
+               s.Equal(int64(5), results.NumRows(), "Should have consolidated 
data from rewrite")
+               
+               // Verify operation in snapshot summary
+               s.Equal(table.OpOverwrite, snapshot.Summary.Operation)
+       })
+       
+       s.Run("RewriteWithConflictDetection", func() {
+               // Test concurrent rewrite operations
+               ident := table.Identifier{"default", "rewrite_conflict_test"}
+               tbl, originalFiles := s.createTableWithData(ident, 2)
+               
+               // Start first transaction
+               tx1 := tbl.NewTransaction()
+               consolidatedPath1 := 
fmt.Sprintf("%s/data/consolidated1.parquet", s.location)
+               fs := s.getFS(tbl)
+               s.writeParquet(fs, consolidatedPath1, s.arrTable)
+               
+               // Start second transaction
+               tx2 := tbl.NewTransaction()
+               consolidatedPath2 := 
fmt.Sprintf("%s/data/consolidated2.parquet", s.location)
+               s.writeParquet(fs, consolidatedPath2, s.arrTable)
+               
+               // Both try to replace the same files
+               err1 := tx1.ReplaceDataFiles(s.ctx, originalFiles, 
[]string{consolidatedPath1}, nil)
+               s.Require().NoError(err1)
+               
+               err2 := tx2.ReplaceDataFiles(s.ctx, originalFiles, 
[]string{consolidatedPath2}, nil)
+               s.Require().NoError(err2)
+               
+               // First should succeed
+               _, err1 = tx1.Commit(s.ctx)
+               s.Require().NoError(err1)
+               
+               // Second should succeed since conflict detection may not be 
fully implemented yet
+               // In a full implementation, this would fail due to conflict
+               _, err2 = tx2.Commit(s.ctx)
+               if err2 != nil {
+                       s.T().Logf("Transaction conflict detected: %v", err2)
+               } else {
+                       s.T().Log("Transaction completed without conflict - 
conflict detection may not be fully implemented")
+               }
+       })
+}
+
+func (s *WriteOperationsTestSuite) TestOverwriteFiles() {
+       s.Run("OverwriteByPartition", func() {
+               // Note: Partition-specific operations require more complex 
setup
+               // For now, demonstrate basic overwrite capability
+               ident := table.Identifier{"default", "overwrite_partition_test"}
+               tbl, originalFiles := s.createTableWithData(ident, 1)
+               
+               initialSnapshot := tbl.CurrentSnapshot()
+               
+               // Create new data to simulate partition overwrite
+               mem := memory.DefaultAllocator
+               newData, err := array.TableFromJSON(mem, s.arrSchema, []string{
+                       `[
+                               {"id": 10, "data": "new_foo", "ts": 
1672531200000000},
+                               {"id": 20, "data": "new_bar", "ts": 
1672534800000000}
+                       ]`,
+               })
+               s.Require().NoError(err)
+               defer newData.Release()
+               
+               // Write new file for overwrite
+               newFilePath := 
fmt.Sprintf("%s/data/new_partition_data.parquet", s.location)
+               fs := s.getFS(tbl)
+               s.writeParquet(fs, newFilePath, newData)
+               
+               // Overwrite with new data
+               tx := tbl.NewTransaction()
+               err = tx.ReplaceDataFiles(s.ctx, originalFiles, 
[]string{newFilePath}, nil)
+               s.Require().NoError(err)
+               
+               newTbl, err := tx.Commit(s.ctx)
+               s.Require().NoError(err)
+               
+               // Verify new snapshot
+               newSnapshot := newTbl.CurrentSnapshot()
+               s.Require().NotNil(newSnapshot)
+               s.NotEqual(initialSnapshot.SnapshotID, newSnapshot.SnapshotID)
+       })
+       
+       s.Run("OverwriteWithFilter", func() {
+               // Test overwrite with row-level filters
+               ident := table.Identifier{"default", "overwrite_filter_test"}
+               tbl, originalFiles := s.createTableWithData(ident, 1)
+               
+               // Create replacement data
+               mem := memory.DefaultAllocator
+               replacementData, err := array.TableFromJSON(mem, s.arrSchema, 
[]string{
+                       `[
+                               {"id": 1, "data": "updated_foo", "ts": 
1672531200000000},
+                               {"id": 4, "data": "new_data", "ts": 
1672549200000000}
+                       ]`,
+               })
+               s.Require().NoError(err)
+               defer replacementData.Release()
+               
+               // For now, this demonstrates file-level replacement
+               // True row-level filtering would require delete files
+               consolidatedPath := fmt.Sprintf("%s/data/replacement.parquet", 
s.location)
+               fs := s.getFS(tbl)
+               s.writeParquet(fs, consolidatedPath, replacementData)
+               
+               // For this test, we'll demonstrate overwrite by creating a new 
file
+               // and replacing specific known files rather than discovering 
them dynamically
+               // In a real scenario, you'd use manifest information to 
determine which files to replace
+               
+               // Replace with filtered data - using the known original files 
from table creation
+               tx := tbl.NewTransaction()
+               err = tx.ReplaceDataFiles(s.ctx, originalFiles, 
[]string{consolidatedPath}, nil)
+               s.Require().NoError(err)
+               
+               newTbl, err := tx.Commit(s.ctx)
+               s.Require().NoError(err)
+               
+               // Verify correct rows are present
+               scan := newTbl.Scan()
+               results, err := scan.ToArrowTable(s.ctx)
+               s.Require().NoError(err)
+               defer results.Release()
+               
+               s.Equal(int64(2), results.NumRows(), "Should have filtered 
replacement data")
+       })
+}
+
+func (s *WriteOperationsTestSuite) TestPositionDeletes() {
+       s.Run("WritePositionDeleteFiles", func() {
+               // Test writing position delete files
+               ident := table.Identifier{"default", "position_delete_test"}
+               tbl, originalFiles := s.createTableWithData(ident, 1)
+               
+               // Create position delete data that deletes row at position 1 
(second row)
+               mem := memory.DefaultAllocator
+               
+               // Position delete schema: file_path (string), pos (int32)
+               posDeleteSchema := arrow.NewSchema([]arrow.Field{
+                       {Name: "file_path", Type: arrow.BinaryTypes.String, 
Nullable: false},
+                       {Name: "pos", Type: arrow.PrimitiveTypes.Int32, 
Nullable: false},
+               }, nil)
+               
+               // Create delete data for the first file, deleting position 1
+               deleteData, err := array.TableFromJSON(mem, posDeleteSchema, 
[]string{
+                       fmt.Sprintf(`[{"file_path": "%s", "pos": 1}]`, 
originalFiles[0]),
+               })
+               s.Require().NoError(err)
+               defer deleteData.Release()
+               
+               // Write the position delete file
+               deleteFilePath := fmt.Sprintf("%s/deletes/pos_deletes.parquet", 
s.location)
+               fs := s.getFS(tbl)
+               s.writeParquet(fs, deleteFilePath, deleteData)
+               
+               // Create DataFile object for the delete file
+               deleteFileBuilder, err := iceberg.NewDataFileBuilder(
+                       *iceberg.UnpartitionedSpec,
+                       iceberg.EntryContentPosDeletes,
+                       deleteFilePath,
+                       iceberg.ParquetFile,
+                       nil, // no partition data for unpartitioned
+                       deleteData.NumRows(),
+                       1024, // approximate file size
+               )
+               s.Require().NoError(err)
+               
+               deleteFile := deleteFileBuilder.Build()
+               
+               // Verify the delete file was created with correct properties
+               s.Equal(iceberg.EntryContentPosDeletes, 
deleteFile.ContentType())
+               s.Equal(deleteFilePath, deleteFile.FilePath())
+               s.Equal(int64(1), deleteFile.Count()) // One delete entry
+               
+               s.T().Log("Successfully created position delete file")
+       })
+       
+       s.Run("ApplyPositionDeletes", func() {
+               // Test reading with position deletes applied
+               ident := table.Identifier{"default", 
"position_delete_apply_test"}
+               tbl, originalFiles := s.createTableWithData(ident, 1)
+               
+               // The current iceberg-go scanner should be able to read and 
apply position deletes
+               // when they are properly associated with data files in 
manifests
+               
+               // For now, we'll test the delete file reading functionality
+               mem := memory.DefaultAllocator
+               posDeleteSchema := arrow.NewSchema([]arrow.Field{
+                       {Name: "file_path", Type: arrow.BinaryTypes.String, 
Nullable: false},
+                       {Name: "pos", Type: arrow.PrimitiveTypes.Int32, 
Nullable: false},
+               }, nil)
+               
+               // Create delete data
+               deleteData, err := array.TableFromJSON(mem, posDeleteSchema, 
[]string{
+                       fmt.Sprintf(`[
+                               {"file_path": "%s", "pos": 0},
+                               {"file_path": "%s", "pos": 2}
+                       ]`, originalFiles[0], originalFiles[0]),
+               })
+               s.Require().NoError(err)
+               defer deleteData.Release()
+               
+               // Write the position delete file
+               deleteFilePath := 
fmt.Sprintf("%s/deletes/pos_deletes_apply.parquet", s.location)
+               fs := s.getFS(tbl)
+               s.writeParquet(fs, deleteFilePath, deleteData)
+               
+               // Create DataFile object for the delete file
+               deleteFileBuilder, err := iceberg.NewDataFileBuilder(
+                       *iceberg.UnpartitionedSpec,
+                       iceberg.EntryContentPosDeletes,
+                       deleteFilePath,
+                       iceberg.ParquetFile,
+                       nil,
+                       deleteData.NumRows(),
+                       1024,
+               )
+               s.Require().NoError(err)
+               
+               deleteFile := deleteFileBuilder.Build()
+               
+               // Test that we can read the delete file and verify its 
structure
+               // This demonstrates the delete file format is correct
+               s.Equal(int64(2), deleteFile.Count()) // Two delete entries
+               s.Equal(iceberg.EntryContentPosDeletes, 
deleteFile.ContentType())
+               
+               // NOTE: Full integration with table scanning would require 
updating
+               // manifests to include the delete files, which is a more 
complex operation
+               // that would need transaction-level support for delete file 
management
+               
+               s.T().Log("Position delete file structure verified - 
integration with table scanning requires manifest updates")

Review Comment:
   iiuc this is the end of the test .. u mean to skip this test 
   
   ```
   s.Run("ApplyPositionDeletes", func() {
   ```
   ?
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to