goiri commented on a change in pull request #2981:
URL: https://github.com/apache/hadoop/pull/2981#discussion_r627643299
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -1035,6 +1054,65 @@ private boolean updateQuota(String mount, long nsQuota,
long ssQuota)
.updateMountTableEntry(updateRequest);
return updateResponse.getStatus();
}
+
+ /**
+ * initViewFsToMountTable.
+ * @param clusterName The specified cluster to initialize.
+ * @return If the quota was updated.
+ * @throws IOException Error adding the mount point.
+ */
+ public boolean initViewFsToMountTable(String clusterName)
+ throws IOException {
+ // fs.viewfs.mounttable.ClusterX.link./data
+ final String mountTablePrefix =
+ Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." +
+ Constants.CONFIG_VIEWFS_LINK + "./";
+ Map<String, String> viewFsMap = getConf().getValByRegex(mountTablePrefix);
+ if (viewFsMap.isEmpty()) {
+ System.out.println("There is no ViewFs mapping to initialize.");
+ return true;
+ }
+ for (Entry<String, String> entry : viewFsMap.entrySet()) {
+ Path path = new Path(entry.getValue());
+ DestinationOrder order = DestinationOrder.HASH;
+ String[] mount = entry.getKey().split(
+ clusterName + "." + Constants.CONFIG_VIEWFS_LINK + ".");
+ if (mount.length < 2) {
+ System.out.println("Added Mount Point failed " + entry.getKey());
+ continue;
+ }
+ String[] nss = new String[]{path.toUri().getAuthority()};
+ boolean added = addMount(
+ mount[1], nss, path.toUri().getPath(), false,
Review comment:
Extract mount[1]
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
##########
@@ -700,6 +707,46 @@ public void testAddMountTableIfParentExist() throws
Exception {
}
}
+ @Test
+ public void testInitViewFsToMountTable() throws Exception {
+ // re-set system out for testing
+ System.setOut(new PrintStream(out));
+ stateStore.loadCache(MountTableStoreImpl.class, true);
+ String nnAddress =
cluster.getRandomNamenode().getNamenode().getHostAndPort();
+
+ String src = "/data";
+ Path destPath = new Path("hdfs://" + nnAddress + "/data");
+ String user = "user1";
+ String group = "group1";
+ String clusterName = "ClusterX";
+
+ // 0.mkdir destPath
+ hdfs.mkdirs(destPath);
+ // 1.set owner
+ hdfs.setOwner(destPath, user, group);
+ // 2.set viewFs mapping
+ admin.getConf().set("fs.viewfs.mounttable.ClusterX.link." + src,
destPath.toString());
+ // 3.run initialization
+ String[] argv = new String[]{"-initViewFsToMountTable", clusterName};
+ assertEquals(0, ToolRunner.run(admin, argv));
+ // 4.gets the mount point entries
+ stateStore.loadCache(MountTableStoreImpl.class, true);
+ GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+ .newInstance("/");
+ GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+ .getMountTableEntries(getRequest);
+ List<MountTable> mountTables = getResponse.getEntries();
+ // 5.check
+ assertEquals(1, mountTables.size());
+ assertEquals(user, mountTables.get(0).getOwnerName());
Review comment:
Extract mountTables.get(0) after the assert for size.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
##########
@@ -78,6 +81,8 @@
private static RouterClient client;
private static Router router;
+ private static DistributedFileSystem hdfs;
Review comment:
I don't think we need any HDFS feature do we? Let's do: fs =
cluster.getCluster().getFileSystem();
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -1035,6 +1054,65 @@ private boolean updateQuota(String mount, long nsQuota,
long ssQuota)
.updateMountTableEntry(updateRequest);
return updateResponse.getStatus();
}
+
+ /**
+ * initViewFsToMountTable.
+ * @param clusterName The specified cluster to initialize.
+ * @return If the quota was updated.
+ * @throws IOException Error adding the mount point.
+ */
+ public boolean initViewFsToMountTable(String clusterName)
+ throws IOException {
+ // fs.viewfs.mounttable.ClusterX.link./data
+ final String mountTablePrefix =
+ Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." +
+ Constants.CONFIG_VIEWFS_LINK + "./";
+ Map<String, String> viewFsMap = getConf().getValByRegex(mountTablePrefix);
+ if (viewFsMap.isEmpty()) {
+ System.out.println("There is no ViewFs mapping to initialize.");
+ return true;
+ }
+ for (Entry<String, String> entry : viewFsMap.entrySet()) {
+ Path path = new Path(entry.getValue());
+ DestinationOrder order = DestinationOrder.HASH;
+ String[] mount = entry.getKey().split(
+ clusterName + "." + Constants.CONFIG_VIEWFS_LINK + ".");
+ if (mount.length < 2) {
+ System.out.println("Added Mount Point failed " + entry.getKey());
Review comment:
Extract entry.getKey()
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -1035,6 +1054,65 @@ private boolean updateQuota(String mount, long nsQuota,
long ssQuota)
.updateMountTableEntry(updateRequest);
return updateResponse.getStatus();
}
+
+ /**
+ * initViewFsToMountTable.
+ * @param clusterName The specified cluster to initialize.
+ * @return If the quota was updated.
+ * @throws IOException Error adding the mount point.
+ */
+ public boolean initViewFsToMountTable(String clusterName)
+ throws IOException {
+ // fs.viewfs.mounttable.ClusterX.link./data
+ final String mountTablePrefix =
+ Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." +
+ Constants.CONFIG_VIEWFS_LINK + "./";
+ Map<String, String> viewFsMap = getConf().getValByRegex(mountTablePrefix);
+ if (viewFsMap.isEmpty()) {
+ System.out.println("There is no ViewFs mapping to initialize.");
+ return true;
+ }
+ for (Entry<String, String> entry : viewFsMap.entrySet()) {
+ Path path = new Path(entry.getValue());
+ DestinationOrder order = DestinationOrder.HASH;
+ String[] mount = entry.getKey().split(
Review comment:
Would it be cleaner to use do a regex or something better than splitting
blindly?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
##########
@@ -700,6 +707,46 @@ public void testAddMountTableIfParentExist() throws
Exception {
}
}
+ @Test
+ public void testInitViewFsToMountTable() throws Exception {
+ // re-set system out for testing
+ System.setOut(new PrintStream(out));
+ stateStore.loadCache(MountTableStoreImpl.class, true);
+ String nnAddress =
cluster.getRandomNamenode().getNamenode().getHostAndPort();
+
+ String src = "/data";
+ Path destPath = new Path("hdfs://" + nnAddress + "/data");
+ String user = "user1";
+ String group = "group1";
+ String clusterName = "ClusterX";
+
+ // 0.mkdir destPath
+ hdfs.mkdirs(destPath);
+ // 1.set owner
+ hdfs.setOwner(destPath, user, group);
+ // 2.set viewFs mapping
+ admin.getConf().set("fs.viewfs.mounttable.ClusterX.link." + src,
destPath.toString());
Review comment:
Can we add some documentation to HDFSRouterFederation.md?
With pointers to viewfs, etc.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -1035,6 +1054,65 @@ private boolean updateQuota(String mount, long nsQuota,
long ssQuota)
.updateMountTableEntry(updateRequest);
return updateResponse.getStatus();
}
+
+ /**
+ * initViewFsToMountTable.
+ * @param clusterName The specified cluster to initialize.
+ * @return If the quota was updated.
+ * @throws IOException Error adding the mount point.
+ */
+ public boolean initViewFsToMountTable(String clusterName)
+ throws IOException {
+ // fs.viewfs.mounttable.ClusterX.link./data
+ final String mountTablePrefix =
+ Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." +
+ Constants.CONFIG_VIEWFS_LINK + "./";
+ Map<String, String> viewFsMap = getConf().getValByRegex(mountTablePrefix);
+ if (viewFsMap.isEmpty()) {
+ System.out.println("There is no ViewFs mapping to initialize.");
+ return true;
+ }
+ for (Entry<String, String> entry : viewFsMap.entrySet()) {
+ Path path = new Path(entry.getValue());
+ DestinationOrder order = DestinationOrder.HASH;
+ String[] mount = entry.getKey().split(
+ clusterName + "." + Constants.CONFIG_VIEWFS_LINK + ".");
+ if (mount.length < 2) {
+ System.out.println("Added Mount Point failed " + entry.getKey());
+ continue;
+ }
+ String[] nss = new String[]{path.toUri().getAuthority()};
+ boolean added = addMount(
+ mount[1], nss, path.toUri().getPath(), false,
+ false, order, getACLEntityFormHdfsPath(path));
+ if (added) {
+ System.out.println("added mount point " + mount[1]);
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Returns ACLEntity according to a HDFS pat.
+ * @param path A path of HDFS.
+ */
+ public ACLEntity getACLEntityFormHdfsPath(Path path){
Review comment:
static
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
##########
@@ -1035,6 +1054,65 @@ private boolean updateQuota(String mount, long nsQuota,
long ssQuota)
.updateMountTableEntry(updateRequest);
return updateResponse.getStatus();
}
+
+ /**
+ * initViewFsToMountTable.
+ * @param clusterName The specified cluster to initialize.
+ * @return If the quota was updated.
+ * @throws IOException Error adding the mount point.
+ */
+ public boolean initViewFsToMountTable(String clusterName)
+ throws IOException {
+ // fs.viewfs.mounttable.ClusterX.link./data
+ final String mountTablePrefix =
+ Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." +
+ Constants.CONFIG_VIEWFS_LINK + "./";
+ Map<String, String> viewFsMap = getConf().getValByRegex(mountTablePrefix);
+ if (viewFsMap.isEmpty()) {
+ System.out.println("There is no ViewFs mapping to initialize.");
+ return true;
+ }
+ for (Entry<String, String> entry : viewFsMap.entrySet()) {
+ Path path = new Path(entry.getValue());
+ DestinationOrder order = DestinationOrder.HASH;
+ String[] mount = entry.getKey().split(
+ clusterName + "." + Constants.CONFIG_VIEWFS_LINK + ".");
+ if (mount.length < 2) {
+ System.out.println("Added Mount Point failed " + entry.getKey());
+ continue;
+ }
+ String[] nss = new String[]{path.toUri().getAuthority()};
+ boolean added = addMount(
+ mount[1], nss, path.toUri().getPath(), false,
+ false, order, getACLEntityFormHdfsPath(path));
+ if (added) {
+ System.out.println("added mount point " + mount[1]);
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Returns ACLEntity according to a HDFS pat.
+ * @param path A path of HDFS.
+ */
+ public ACLEntity getACLEntityFormHdfsPath(Path path){
+ String owner = null;
+ String group = null;
+ FsPermission mode = null;
+ try {
+ FileSystem fs = path.getFileSystem(getConf());
+ if (fs.exists(path)) {
+ FileStatus fileStatus = fs.getFileStatus(path);
+ owner = fileStatus.getOwner();
+ group = fileStatus.getGroup();
+ mode = fileStatus.getPermission();
+ }
+ } catch (IOException e) {
+ System.out.println("Exception encountered " + e);
Review comment:
.err.?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]