y0psolo commented on code in PR #62:
URL: https://github.com/apache/iceberg-rust/pull/62#discussion_r1349568117


##########
crates/iceberg/src/spec/table_metadata.rs:
##########
@@ -93,22 +112,291 @@ pub struct TableMetadata {
     /// previous metadata file location should be added to the list.
     /// Tables can be configured to remove oldest metadata log entries and
     /// keep a fixed-size log of the most recent entries after a commit.
+    #[builder(default)]
     metadata_log: Vec<MetadataLog>,
 
     /// A list of sort orders, stored as full sort order objects.
+    #[builder(default)]
     sort_orders: HashMap<i64, SortOrder>,
     /// Default sort order id of the table. Note that this could be used by
     /// writers, but is not used when reading because reads use the specs
     /// stored in manifest files.
+    #[builder(default = "DEFAULT_SORT_ORDER_ID")]
     default_sort_order_id: i64,
     ///A map of snapshot references. The map keys are the unique snapshot 
reference
     /// names in the table, and the map values are snapshot reference objects.
     /// There is always a main branch reference pointing to the 
current-snapshot-id
     /// even if the refs map is null.
+    #[builder(default)]
     refs: HashMap<String, SnapshotReference>,
 }
 
+// We define a from implementation from builder Error to Iceberg Error
+impl From<UninitializedFieldError> for Error {
+    fn from(ufe: UninitializedFieldError) -> Error {
+        Error::new(ErrorKind::DataInvalid, ufe.to_string())
+    }
+}
+
+impl TableMetadataBuilder {
+    /// Initialize a TableMetadata with a TableCreation struct
+    /// the Schema, sortOrder and PartitionSpec will be set as current
+    pub fn with_table_creation(&mut self, tc: TableCreation) -> &mut Self {
+        self.with_location(tc.location)
+            .with_properties(tc.properties)
+            .with_sort_order(tc.sort_order, true)
+            .with_schema(tc.schema, true);
+
+        if let Some(partition_spec) = tc.partition_spec {
+            self.with_partition_spec(partition_spec, true);
+        }
+        self
+    }
+
+    /// Add a schema to the TableMetadata
+    /// schema : Schema to be added or replaced
+    /// current : True if the schema is the current one
+    pub fn with_schema(&mut self, schema: Schema, current: bool) -> &mut Self {
+        if current {
+            self.current_schema_id = Some(schema.schema_id());
+            self.last_column_id = Some(schema.highest_field_id());
+        }
+        if let Some(map) = self.schemas.as_mut() {
+            map.insert(schema.schema_id(), Arc::new(schema));
+        } else {
+            self.schemas = Some(HashMap::from([(schema.schema_id(), 
Arc::new(schema))]));
+        };
+        self
+    }
+
+    /// Add a partition_spec to the TableMetadata and update the 
last_partition_id accordinlgy
+    /// partition_spec : PartitionSpec to be added or replaced
+    /// default: True if this PartitionSpec is the default one
+    pub fn with_partition_spec(
+        &mut self,
+        partition_spec: PartitionSpec,
+        default: bool,
+    ) -> &mut Self {
+        if default {
+            self.default_spec_id = Some(partition_spec.spec_id);
+        }
+        let max_id = partition_spec
+            .fields
+            .iter()
+            .map(|field| field.field_id)
+            .max();
+        if max_id > self.last_partition_id {
+            self.last_partition_id = max_id;
+        }
+        if let Some(map) = self.partition_specs.as_mut() {
+            map.insert(partition_spec.spec_id, partition_spec);
+        } else {
+            self.partition_specs = 
Some(HashMap::from([(partition_spec.spec_id, partition_spec)]));
+        };
+        self
+    }
+
+    /// Add a snapshot to the TableMetadata and update last_sequence_number
+    /// snapshot : Snapshot to be added or replaced
+    /// current : True if the snapshot is the current one
+    pub fn with_snapshot(&mut self, snapshot: Snapshot, current: bool) -> &mut 
Self {
+        if current {
+            self.current_snapshot_id = Some(Some(snapshot.snapshot_id()))
+        }
+        if Some(snapshot.sequence_number()) > self.last_sequence_number {
+            self.last_sequence_number = Some(snapshot.sequence_number())
+        }
+        if let Some(Some(map)) = self.snapshots.as_mut() {
+            map.insert(snapshot.snapshot_id(), Arc::new(snapshot));
+        } else {
+            self.snapshots = Some(Some(HashMap::from([(
+                snapshot.snapshot_id(),
+                Arc::new(snapshot),
+            )])));
+        };
+
+        self
+    }
+
+    /// Add a sort_order to the TableMetadata
+    /// sort_order : SortOrder to be added or replaced
+    /// default: True if this SortOrder is the default one
+    pub fn with_sort_order(&mut self, sort_order: SortOrder, default: bool) -> 
&mut Self {
+        if default {
+            self.default_sort_order_id = Some(sort_order.order_id)
+        }
+        if let Some(map) = self.sort_orders.as_mut() {
+            map.insert(sort_order.order_id, sort_order);
+        } else {
+            self.sort_orders = Some(HashMap::from([(sort_order.order_id, 
sort_order)]));
+        };
+        self
+    }
+
+    /// Add a snapshot_reference to the TableMetadata
+    /// key_ref : reference id of the snapshot
+    /// snapshot_ref : SnapshotReference to add or update
+    pub fn with_ref(&mut self, key_ref: String, snapshot_ref: 
SnapshotReference) -> &mut Self {
+        if let Some(map) = self.refs.as_mut() {
+            map.insert(key_ref, snapshot_ref);
+        } else {
+            self.refs = Some(HashMap::from([(key_ref, snapshot_ref)]));
+        };
+        self
+    }
+
+    /// Check if the default key exists in the map.
+    /// Verify incoherent behavior and throw Error if :
+    /// - the map is not defined but the key exists
+    /// - the default key exists but there is no map
+    ///     except if key is a default value
+    /// Params :
+    /// - key : the key to look for in the map
+    /// - map : the map to scan
+    /// - default : default value for the key if any
+    /// - field : map name for throwing a better error
+    fn check_id_in_map<K: std::hash::Hash + Eq + std::fmt::Display + Copy, T>(

Review Comment:
   if we restrict with only the  given list of setter below we should not need 
this kind of validation as we will compute every value inside the builder. So 
they should be coherent.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to