JanKaul commented on code in PR #53:
URL: https://github.com/apache/iceberg-rust/pull/53#discussion_r1329767239


##########
crates/iceberg/src/io.rs:
##########
@@ -0,0 +1,451 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! File io implementation.
+//!
+//! # How to build `FileIO`
+//!
+//! We provided a `FileIOBuilder` to build `FileIO` from scratch. For example:
+//! ```rust
+//! use iceberg::io::{FileIOBuilder, S3_REGION};
+//!
+//! let file_io = FileIOBuilder::new("s3")
+//!     .with_prop(S3_REGION, "us-east-1")
+//!     .build()
+//!     .unwrap();
+//! ```
+//!
+//! # How to use `FileIO`
+//!
+//! Currently `FileIO` provides simple methods for file operations:
+//!
+//! - `delete`: Delete file.
+//! - `is_exist`: Check if file exists.
+//! - `new_input`: Create input file for reading.
+//! - `new_output`: Create output file for writing.
+
+use std::{collections::HashMap, sync::Arc};
+
+use crate::{error::Result, Error, ErrorKind};
+use futures::{AsyncRead, AsyncSeek, AsyncWrite};
+use once_cell::sync::Lazy;
+use opendal::{Operator, Scheme};
+use url::Url;
+
+/// Following are arguments for s3 file io.
+/// S3 endopint.
+pub const S3_ENDPOINT: &str = "s3.endpoint";
+/// S3 access key id.
+pub const S3_ACCESS_KEY_ID: &str = "s3.access-key-id";
+/// S3 secret access key.
+pub const S3_SECRET_ACCESS_KEY: &str = "s3.secret-access-key";
+/// S3 region.
+pub const S3_REGION: &str = "s3.region";
+
+/// A mapping from iceberg s3 configuration key to [`opendal::Operator`] 
configuration key.
+static S3_CONFIG_MAPPING: Lazy<HashMap<&'static str, &'static str>> = 
Lazy::new(|| {
+    let mut m = HashMap::with_capacity(4);
+    m.insert(S3_ENDPOINT, "endpoint");
+    m.insert(S3_ACCESS_KEY_ID, "access_key_id");
+    m.insert(S3_SECRET_ACCESS_KEY, "secret_access_key");
+    m.insert(S3_REGION, "region");
+
+    m
+});
+
+const ROOT_PATH: &str = "/";
+/// FileIO implementation, used to manipulate files in underlying storage.
+///
+/// # Note
+///
+/// All path passed to `FileIO` must be absolute path starting with scheme 
string used to construct `FileIO`.
+/// For example, if you construct `FileIO` with `s3a` scheme, then all path 
passed to `FileIO` must start with `s3a://`.
+#[derive(Clone, Debug)]
+pub struct FileIO {
+    inner: Arc<Storage>,
+}
+
+/// Builder for [`FileIO`].
+pub struct FileIOBuilder {
+    /// This is used to infer scheme of operator.
+    ///
+    /// If this is `None`, then [`FileIOBuilder::build`](FileIOBuilder::build) 
will build a local file io.
+    scheme_str: Option<String>,
+    /// Arguments for operator.
+    props: HashMap<String, String>,
+}
+
+impl FileIOBuilder {
+    /// Creates a new builder with scheme.
+    pub fn new(scheme_str: impl ToString) -> Self {
+        Self {
+            scheme_str: Some(scheme_str.to_string()),
+            props: HashMap::default(),
+        }
+    }
+
+    /// Creates a new builder for local file io.
+    pub fn new_local_file_io() -> Self {
+        Self {
+            scheme_str: None,
+            props: HashMap::default(),
+        }
+    }
+
+    /// Add argument for operator.
+    pub fn with_prop(mut self, key: impl ToString, value: impl ToString) -> 
Self {
+        self.props.insert(key.to_string(), value.to_string());
+        self
+    }
+
+    /// Add argument for operator.
+    pub fn with_props(
+        mut self,
+        args: impl IntoIterator<Item = (impl ToString, impl ToString)>,
+    ) -> Self {
+        self.props
+            .extend(args.into_iter().map(|e| (e.0.to_string(), 
e.1.to_string())));
+        self
+    }
+
+    /// Builds [`FileIO`].
+    pub fn build(self) -> Result<FileIO> {
+        let storage = Storage::build(self)?;
+        Ok(FileIO {
+            inner: Arc::new(storage),
+        })
+    }
+}
+
+impl FileIO {
+    /// Deletes file.
+    pub async fn delete(&self, path: impl AsRef<str>) -> Result<()> {
+        let (op, relative_path) = self.inner.create_operator(&path)?;
+        Ok(op.delete(relative_path).await?)
+    }
+
+    /// Check file exists.
+    pub async fn is_exist(&self, path: impl AsRef<str>) -> Result<bool> {
+        let (op, relative_path) = self.inner.create_operator(&path)?;
+        Ok(op.is_exist(relative_path).await?)
+    }
+
+    /// Creates input file.
+    pub fn new_input(&self, path: impl AsRef<str>) -> Result<InputFile> {
+        let (op, relative_path) = self.inner.create_operator(&path)?;
+        let path = path.as_ref().to_string();
+        let relative_path_pos = path.len() - relative_path.len();
+        Ok(InputFile {
+            op,
+            path,
+            relative_path_pos,
+        })
+    }
+
+    /// Creates output file.
+    pub fn new_output(&self, path: impl AsRef<str>) -> Result<OutputFile> {
+        let (op, relative_path) = self.inner.create_operator(&path)?;
+        let path = path.as_ref().to_string();
+        let relative_path_pos = path.len() - relative_path.len();
+        Ok(OutputFile {
+            op,
+            path,
+            relative_path_pos,
+        })
+    }
+}
+
+/// Input file implementation.
+#[derive(Debug)]
+pub struct InputFile {
+    op: Operator,
+    // Absolution path of file.
+    path: String,
+    // Relative path of file to uri, starts at [`relative_path_pos`]
+    relative_path_pos: usize,
+}
+
+/// Input stream for reading.
+pub trait InputStream: AsyncRead + AsyncSeek {}

Review Comment:
   While the traits aliases make the implementation more consistent, I'm not 
sure whether they convey the same information when looking at the function 
signature in the documentation. The traits `AsyncRead` and `AsyncWrite` are 
often used traits in the rust ecosystem and users have an understanding of the 
concepts. 
   
   I remember that I was looking for an object store implementation and I was 
explicitly searching for `AsyncWrite` because the library I was using required 
a type that implements `AsyncWrite`. I'm not sure if that is obvious with `impl 
FileWrite`.
   
   Just something to think about.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to