[
https://issues.apache.org/jira/browse/HADOOP-19236?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=18017355#comment-18017355
]
ASF GitHub Bot commented on HADOOP-19236:
-----------------------------------------
slfan1989 commented on code in PR #7545:
URL: https://github.com/apache/hadoop/pull/7545#discussion_r2312903248
##########
hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/TestDirectoryStorage.java:
##########
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.tosfs.object;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.tosfs.TestEnv;
+import org.apache.hadoop.fs.tosfs.conf.TosKeys;
+import org.apache.hadoop.fs.tosfs.util.CommonUtils;
+import org.apache.hadoop.fs.tosfs.util.TestUtility;
+import org.apache.hadoop.fs.tosfs.util.UUIDUtils;
+import org.assertj.core.api.Assertions;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import static org.apache.hadoop.fs.tosfs.util.TestUtility.scheme;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+public class TestDirectoryStorage {
+ private final ObjectStorage storage;
+
+ public TestDirectoryStorage() {
+ Configuration conf = new Configuration();
+ storage =
+ ObjectStorageFactory.createWithPrefix(String.format("%s-%s/",
scheme(), UUIDUtils.random()),
+ scheme(), TestUtility.bucket(), conf);
+ }
+
+ @BeforeAll
+ public static void before() {
+ assumeTrue(TestEnv.checkTestEnabled());
+ }
+
+ @AfterEach
+ public void tearDown() {
+ CommonUtils.runQuietly(() -> storage.deleteAll(""));
+ for (MultipartUpload upload : storage.listUploads("")) {
+ storage.abortMultipartUpload(upload.key(), upload.uploadId());
+ }
+ }
+
+ @Test
+ public void testListEmptyDir() {
+ String key = "testListEmptyDir/";
+ mkdir(key);
+ assertNotNull(directoryStorage().head(key));
+
+ assertFalse(directoryStorage().listDir(key, false).iterator().hasNext());
+ assertFalse(directoryStorage().listDir(key, false).iterator().hasNext());
+ assertTrue(directoryStorage().isEmptyDir(key));
+ }
+
+ @Test
+ public void testListNonExistDir() {
+ String key = "testListNonExistDir/";
+ assertNull(directoryStorage().head(key));
+
+ assertFalse(directoryStorage().listDir(key, false).iterator().hasNext());
+ assertFalse(directoryStorage().listDir(key, false).iterator().hasNext());
+ assertTrue(directoryStorage().isEmptyDir(key));
+ }
+
+ @Test
+ public void testRecursiveList() {
+ String root = "root/";
+ String file1 = "root/file1";
+ String file2 = "root/afile2";
+ String dir1 = "root/dir1/";
+ String file3 = "root/dir1/file3";
+
+ mkdir(root);
+ mkdir(dir1);
+ touchFile(file1, TestUtility.rand(8));
+ touchFile(file2, TestUtility.rand(8));
+ touchFile(file3, TestUtility.rand(8));
+
+ Assertions.assertThat(directoryStorage().listDir(root, false))
+ .hasSize(3)
+ .extracting(ObjectInfo::key)
+ .contains(dir1, file1, file2);
+
+ Assertions.assertThat(directoryStorage().listDir(root, true))
+ .hasSize(4)
+ .extracting(ObjectInfo::key)
+ .contains(dir1, file1, file2, file3);
+ }
+
+ @Test
+ public void testRecursiveListWithSmallBatch() {
+ Configuration conf = new Configuration(directoryStorage().conf());
+ conf.setInt(TosKeys.FS_TOS_LIST_OBJECTS_COUNT, 5);
+ directoryStorage().initialize(conf, directoryStorage().bucket().name());
+
+ String root = "root/";
+ mkdir(root);
+
+ // Create 2 files start with 'a', 2 sub dirs start with 'b', 2 files start
with 'c'
+ for (int i = 1; i <= 2; i++) {
+ touchFile("root/a-file-" + i, TestUtility.rand(8));
+ mkdir("root/b-dir-" + i + "/");
+ touchFile("root/c-file-" + i, TestUtility.rand(8));
+ }
+
+ // Create two files under each sub dirs.
+ for (int j = 1; j <= 2; j++) {
+ touchFile(String.format("root/b-dir-%d/file1", j), TestUtility.rand(8));
+ touchFile(String.format("root/b-dir-%d/file2", j), TestUtility.rand(8));
+ }
+
+ Assertions.assertThat(directoryStorage().listDir(root, false))
+ .hasSize(6)
+ .extracting(ObjectInfo::key)
+ .contains(
+ "root/a-file-1", "root/a-file-2",
+ "root/b-dir-1/", "root/b-dir-2/",
+ "root/c-file-1", "root/c-file-2");
+
+ Assertions.assertThat(directoryStorage().listDir(root, true))
Review Comment:
Similarly, static imports should be used.
> Integration of Volcano Engine TOS in Hadoop.
> --------------------------------------------
>
> Key: HADOOP-19236
> URL: https://issues.apache.org/jira/browse/HADOOP-19236
> Project: Hadoop Common
> Issue Type: New Feature
> Components: fs, tools
> Affects Versions: 3.4.0
> Reporter: Jinglun
> Assignee: Zheng Hu
> Priority: Major
> Labels: pull-request-available
> Attachments: Integration of Volcano Engine TOS in Hadoop.pdf
>
>
> Volcano Engine is a fast growing cloud vendor launched by ByteDance, and TOS
> is the object storage service of Volcano Engine. A common way is to store
> data into TOS and run Hadoop/Spark/Flink applications to access TOS. But
> there is no original support for TOS in hadoop, thus it is not easy for users
> to build their Big Data System based on TOS.
>
> This work aims to integrate TOS with Hadoop to help users run their
> applications on TOS. Users only need to do some simple configuration, then
> their applications can read/write TOS without any code change. This work is
> similar to AWS S3, AzureBlob, AliyunOSS, Tencnet COS and HuaweiCloud Object
> Storage in Hadoop.
>
> Please see the attached document "Integration of Volcano Engine TOS in
> Hadoop" for more details.
--
This message was sent by Atlassian Jira
(v8.20.10#820010)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]