kgeisz commented on code in PR #7149:
URL: https://github.com/apache/hbase/pull/7149#discussion_r2317553328


##########
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesRegionProcedure.java:
##########
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.Optional;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
+import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
+import org.apache.hadoop.hbase.regionserver.RefreshHFilesCallable;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+
[email protected]
+public class RefreshHFilesRegionProcedure extends Procedure<MasterProcedureEnv>
+  implements TableProcedureInterface,
+  RemoteProcedureDispatcher.RemoteProcedure<MasterProcedureEnv, ServerName> {
+  private static final Logger LOG = 
LoggerFactory.getLogger(RefreshHFilesRegionProcedure.class);
+  private RegionInfo region;
+  private ProcedureEvent<?> event;
+  private boolean dispatched;
+  private boolean succ;
+  private RetryCounter retryCounter;
+
+  public RefreshHFilesRegionProcedure() {
+  }
+
+  public RefreshHFilesRegionProcedure(RegionInfo region) {
+    this.region = region;
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    MasterProcedureProtos.RefreshHFilesRegionProcedureStateData data =
+      
serializer.deserialize(MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.class);
+    this.region = ProtobufUtil.toRegionInfo(data.getRegion());
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.Builder 
builder =
+      MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.newBuilder();
+    builder.setRegion(ProtobufUtil.toRegionInfo(region));
+    serializer.serialize(builder.build());
+  }
+
+  @Override
+  protected boolean abort(MasterProcedureEnv env) {
+    return false;
+  }
+
+  @Override
+  protected void rollback(MasterProcedureEnv env) throws IOException, 
InterruptedException {
+    throw new UnsupportedOperationException();
+  }
+
+  private void setTimeoutForSuspend(MasterProcedureEnv env, String reason) {
+    if (retryCounter == null) {
+      retryCounter = 
ProcedureUtil.createRetryCounter(env.getMasterConfiguration());
+    }
+    long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
+    LOG.warn("{} can not run currently because {}, wait {} ms to retry", this, 
reason, backoff);
+    setTimeout(Math.toIntExact(backoff));
+    setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
+    skipPersistence();
+  }
+
+  @Override
+  protected Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env)
+    throws ProcedureYieldException, ProcedureSuspendedException, 
InterruptedException {
+    if (dispatched) {
+      if (succ) {
+        return null;
+      }
+      dispatched = false;
+    }
+
+    RegionStates regionStates = env.getAssignmentManager().getRegionStates();
+    RegionStateNode regionNode = regionStates.getRegionStateNode(region);
+
+    if (regionNode.getProcedure() != null) {
+      setTimeoutForSuspend(env, String.format("region %s has a TRSP attached 
%s",
+        region.getRegionNameAsString(), regionNode.getProcedure()));
+      throw new ProcedureSuspendedException();
+    }
+
+    if (!regionNode.isInState(RegionState.State.OPEN)) {
+      LOG.info("State of region {} is not OPEN. Skip {} ...", region, this);
+      setTimeoutForSuspend(env, String.format("region state of %s is %s",
+        region.getRegionNameAsString(), regionNode.getState()));
+      return null;
+    }
+
+    ServerName targetServer = regionNode.getRegionLocation();
+    if (targetServer == null) {
+      setTimeoutForSuspend(env,
+        String.format("target server of region %s is null", 
region.getRegionNameAsString()));
+      throw new ProcedureSuspendedException();
+    }
+
+    try {
+      env.getRemoteDispatcher().addOperationToNode(targetServer, this);
+      dispatched = true;
+      event = new ProcedureEvent<>(this);
+      event.suspendIfNotReady(this);
+      throw new ProcedureSuspendedException();

Review Comment:
   Why do we throw a `ProcedureSuspendedException` here?



##########
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesRegionProcedure.java:
##########
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.Optional;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
+import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
+import org.apache.hadoop.hbase.regionserver.RefreshHFilesCallable;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+
[email protected]
+public class RefreshHFilesRegionProcedure extends Procedure<MasterProcedureEnv>
+  implements TableProcedureInterface,
+  RemoteProcedureDispatcher.RemoteProcedure<MasterProcedureEnv, ServerName> {
+  private static final Logger LOG = 
LoggerFactory.getLogger(RefreshHFilesRegionProcedure.class);
+  private RegionInfo region;
+  private ProcedureEvent<?> event;
+  private boolean dispatched;
+  private boolean succ;
+  private RetryCounter retryCounter;
+
+  public RefreshHFilesRegionProcedure() {
+  }
+
+  public RefreshHFilesRegionProcedure(RegionInfo region) {
+    this.region = region;
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    MasterProcedureProtos.RefreshHFilesRegionProcedureStateData data =
+      
serializer.deserialize(MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.class);
+    this.region = ProtobufUtil.toRegionInfo(data.getRegion());
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.Builder 
builder =
+      MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.newBuilder();
+    builder.setRegion(ProtobufUtil.toRegionInfo(region));
+    serializer.serialize(builder.build());
+  }
+
+  @Override
+  protected boolean abort(MasterProcedureEnv env) {
+    return false;
+  }
+
+  @Override
+  protected void rollback(MasterProcedureEnv env) throws IOException, 
InterruptedException {
+    throw new UnsupportedOperationException();
+  }
+
+  private void setTimeoutForSuspend(MasterProcedureEnv env, String reason) {
+    if (retryCounter == null) {
+      retryCounter = 
ProcedureUtil.createRetryCounter(env.getMasterConfiguration());
+    }
+    long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
+    LOG.warn("{} can not run currently because {}, wait {} ms to retry", this, 
reason, backoff);
+    setTimeout(Math.toIntExact(backoff));
+    setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
+    skipPersistence();
+  }
+
+  @Override
+  protected Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env)
+    throws ProcedureYieldException, ProcedureSuspendedException, 
InterruptedException {
+    if (dispatched) {
+      if (succ) {
+        return null;
+      }
+      dispatched = false;
+    }
+
+    RegionStates regionStates = env.getAssignmentManager().getRegionStates();
+    RegionStateNode regionNode = regionStates.getRegionStateNode(region);
+
+    if (regionNode.getProcedure() != null) {
+      setTimeoutForSuspend(env, String.format("region %s has a TRSP attached 
%s",
+        region.getRegionNameAsString(), regionNode.getProcedure()));
+      throw new ProcedureSuspendedException();
+    }
+
+    if (!regionNode.isInState(RegionState.State.OPEN)) {
+      LOG.info("State of region {} is not OPEN. Skip {} ...", region, this);
+      setTimeoutForSuspend(env, String.format("region state of %s is %s",
+        region.getRegionNameAsString(), regionNode.getState()));
+      return null;
+    }
+
+    ServerName targetServer = regionNode.getRegionLocation();
+    if (targetServer == null) {
+      setTimeoutForSuspend(env,
+        String.format("target server of region %s is null", 
region.getRegionNameAsString()));
+      throw new ProcedureSuspendedException();
+    }
+
+    try {
+      env.getRemoteDispatcher().addOperationToNode(targetServer, this);
+      dispatched = true;
+      event = new ProcedureEvent<>(this);
+      event.suspendIfNotReady(this);
+      throw new ProcedureSuspendedException();
+    } catch (FailedRemoteDispatchException e) {
+      setTimeoutForSuspend(env, "Failed send request to " + targetServer);
+      throw new ProcedureSuspendedException();
+    }
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+    return TableOperationType.REFRESH_HFILES;
+  }
+
+  @Override
+  public TableName getTableName() {
+    return region.getTable();
+  }
+
+  @Override
+  public void remoteOperationFailed(MasterProcedureEnv env, 
RemoteProcedureException error) {
+    complete(env, error);
+  }
+
+  @Override
+  public void remoteOperationCompleted(MasterProcedureEnv env) {
+    complete(env, null);
+  }
+
+  @Override
+  public void remoteCallFailed(MasterProcedureEnv env, ServerName serverName, 
IOException e) {
+    complete(env, e);
+  }
+
+  private void complete(MasterProcedureEnv env, Throwable error) {
+    if (isFinished()) {
+      LOG.info("This procedure {} is already finished, skip the rest 
processes", this.getProcId());

Review Comment:
   nit
   ```suggestion
         LOG.info("This procedure {} is already finished. Skip the rest of the 
processes", this.getProcId());
   ```



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static 
org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestRefreshHFilesFromClient {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRefreshHFilesFromClient.class);
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRefreshHFilesFromClient.class);
+  private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+  private static AsyncConnection asyncConn;
+  private static Admin admin;
+  private static Configuration conf;
+  private static final TableName TEST_TABLE = 
TableName.valueOf("testRefreshHFilesTable");
+  private static final TableName TEST_INVALID_TABLE =
+    TableName.valueOf("testRefreshHFilesInvalidTable");
+  private static final String TEST_NAMESPACE = "testRefreshHFilesNamespace";
+  private static final String TEST_INVALID_NAMESPACE = 
"testRefreshHFilesInvalidNamespace";
+  private static final String TEST_TABLE_IN_NAMESPACE = TEST_NAMESPACE + 
":testTableInNamespace";
+  private static final byte[] TEST_FAMILY = 
Bytes.toBytes("testRefreshHFilesCF1");
+
+  @Before
+  public void setUp() throws Exception {
+    conf = TEST_UTIL.getConfiguration();
+    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 60000);
+    conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 120000);
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
+    conf.setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0);
+    conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
+
+    // Start the test cluster
+    TEST_UTIL.startMiniCluster(1);
+    asyncConn = 
ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
+    admin = TEST_UTIL.getAdmin();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    Closeables.close(asyncConn, true);
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private void createNamespace(String namespace) throws RuntimeException {
+    try {
+      final NamespaceDescriptor nsd = 
NamespaceDescriptor.create(namespace).build();
+      // Create the namespace if it doesn’t exist
+      if (
+        Arrays.stream(admin.listNamespaceDescriptors())
+          .noneMatch(ns -> ns.getName().equals(namespace))
+      ) {
+        admin.createNamespace(nsd);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void deleteNamespace(String namespace) {
+    try {
+      // List table in namespace
+      TableName[] tables = admin.listTableNamesByNamespace(namespace);
+      for (TableName t : tables) {
+        TEST_UTIL.deleteTableIfAny(t);
+      }
+      // Now delete the namespace
+      admin.deleteNamespace(namespace);
+    } catch (Exception e) {
+      LOG.debug(
+        "Unable to delete namespace " + namespace + " post test execution. 
This isn't a failure");
+    }
+  }
+
+  @Test
+  public void testRefreshHFilesForTable() throws Exception {
+    try {
+      // Create table in default namespace
+      TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY);
+      TEST_UTIL.waitTableAvailable(TEST_TABLE);
+
+      // RefreshHFiles for table
+      Long procId = admin.refreshHFiles(TEST_TABLE);
+      assertTrue(procId >= 0);
+    } catch (Exception e) {
+      Assert.fail("RefreshHFilesForTable Should Not Throw Exception: " + e);
+      throw new RuntimeException(e);
+    } finally {
+      // Delete table name post test execution
+      TEST_UTIL.deleteTableIfAny(TEST_TABLE);
+    }
+  }
+
+  // Not creating table hence refresh should throw exception
+  @Test(expected = TableNotFoundException.class)
+  public void testRefreshHFilesForInvalidTable() throws Exception {
+    // RefreshHFiles for table

Review Comment:
   This comment can probably be removed since the HFiles don't actually get 
refreshed.



##########
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesRegionProcedure.java:
##########
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.Optional;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
+import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
+import org.apache.hadoop.hbase.regionserver.RefreshHFilesCallable;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+

Review Comment:
   Can you please add some documentation about this class? It may be useful. I 
see some classes in this directory don't have documentation, but others do.



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static 
org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestRefreshHFilesFromClient {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRefreshHFilesFromClient.class);
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRefreshHFilesFromClient.class);
+  private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+  private static AsyncConnection asyncConn;
+  private static Admin admin;
+  private static Configuration conf;
+  private static final TableName TEST_TABLE = 
TableName.valueOf("testRefreshHFilesTable");
+  private static final TableName TEST_INVALID_TABLE =
+    TableName.valueOf("testRefreshHFilesInvalidTable");
+  private static final String TEST_NAMESPACE = "testRefreshHFilesNamespace";
+  private static final String TEST_INVALID_NAMESPACE = 
"testRefreshHFilesInvalidNamespace";
+  private static final String TEST_TABLE_IN_NAMESPACE = TEST_NAMESPACE + 
":testTableInNamespace";
+  private static final byte[] TEST_FAMILY = 
Bytes.toBytes("testRefreshHFilesCF1");
+
+  @Before
+  public void setUp() throws Exception {
+    conf = TEST_UTIL.getConfiguration();
+    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 60000);
+    conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 120000);
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
+    conf.setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0);
+    conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
+
+    // Start the test cluster
+    TEST_UTIL.startMiniCluster(1);
+    asyncConn = 
ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
+    admin = TEST_UTIL.getAdmin();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    Closeables.close(asyncConn, true);
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private void createNamespace(String namespace) throws RuntimeException {
+    try {
+      final NamespaceDescriptor nsd = 
NamespaceDescriptor.create(namespace).build();
+      // Create the namespace if it doesn’t exist
+      if (
+        Arrays.stream(admin.listNamespaceDescriptors())
+          .noneMatch(ns -> ns.getName().equals(namespace))
+      ) {
+        admin.createNamespace(nsd);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void deleteNamespace(String namespace) {
+    try {
+      // List table in namespace
+      TableName[] tables = admin.listTableNamesByNamespace(namespace);
+      for (TableName t : tables) {
+        TEST_UTIL.deleteTableIfAny(t);
+      }
+      // Now delete the namespace
+      admin.deleteNamespace(namespace);
+    } catch (Exception e) {
+      LOG.debug(
+        "Unable to delete namespace " + namespace + " post test execution. 
This isn't a failure");
+    }
+  }
+
+  @Test
+  public void testRefreshHFilesForTable() throws Exception {
+    try {
+      // Create table in default namespace
+      TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY);
+      TEST_UTIL.waitTableAvailable(TEST_TABLE);
+
+      // RefreshHFiles for table
+      Long procId = admin.refreshHFiles(TEST_TABLE);
+      assertTrue(procId >= 0);
+    } catch (Exception e) {
+      Assert.fail("RefreshHFilesForTable Should Not Throw Exception: " + e);
+      throw new RuntimeException(e);
+    } finally {
+      // Delete table name post test execution
+      TEST_UTIL.deleteTableIfAny(TEST_TABLE);
+    }
+  }
+
+  // Not creating table hence refresh should throw exception
+  @Test(expected = TableNotFoundException.class)
+  public void testRefreshHFilesForInvalidTable() throws Exception {
+    // RefreshHFiles for table
+    admin.refreshHFiles(TEST_INVALID_TABLE);
+  }
+
+  @Test
+  public void testRefreshHFilesForNamespace() throws Exception {
+    try {
+      createNamespace(TEST_NAMESPACE);
+
+      // Create table under test namespace
+      TEST_UTIL.createTable(TableName.valueOf(TEST_TABLE_IN_NAMESPACE), 
TEST_FAMILY);
+      TEST_UTIL.waitTableAvailable(TableName.valueOf(TEST_TABLE_IN_NAMESPACE));
+
+      // RefreshHFiles for namespace
+      Long procId = admin.refreshHFiles(TEST_NAMESPACE);
+      assertTrue(procId >= 0);
+
+    } catch (Exception e) {
+      Assert.fail("RefreshHFilesForAllNamespace Should Not Throw Exception: " 
+ e);
+      throw new RuntimeException(e);
+    } finally {
+      // Delete namespace post test execution
+      deleteNamespace(TEST_NAMESPACE);
+    }
+  }
+
+  @Test(expected = NamespaceNotFoundException.class)
+  public void testRefreshHFilesForInvalidNamespace() throws Exception {
+    // RefreshHFiles for namespace

Review Comment:
   This comment probably isn't needed



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static 
org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestRefreshHFilesFromClient {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRefreshHFilesFromClient.class);
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRefreshHFilesFromClient.class);
+  private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+  private static AsyncConnection asyncConn;
+  private static Admin admin;
+  private static Configuration conf;
+  private static final TableName TEST_TABLE = 
TableName.valueOf("testRefreshHFilesTable");
+  private static final TableName TEST_INVALID_TABLE =
+    TableName.valueOf("testRefreshHFilesInvalidTable");
+  private static final String TEST_NAMESPACE = "testRefreshHFilesNamespace";
+  private static final String TEST_INVALID_NAMESPACE = 
"testRefreshHFilesInvalidNamespace";
+  private static final String TEST_TABLE_IN_NAMESPACE = TEST_NAMESPACE + 
":testTableInNamespace";
+  private static final byte[] TEST_FAMILY = 
Bytes.toBytes("testRefreshHFilesCF1");
+
+  @Before
+  public void setUp() throws Exception {
+    conf = TEST_UTIL.getConfiguration();
+    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 60000);
+    conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 120000);
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
+    conf.setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0);
+    conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
+
+    // Start the test cluster
+    TEST_UTIL.startMiniCluster(1);
+    asyncConn = 
ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
+    admin = TEST_UTIL.getAdmin();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    Closeables.close(asyncConn, true);
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private void createNamespace(String namespace) throws RuntimeException {
+    try {
+      final NamespaceDescriptor nsd = 
NamespaceDescriptor.create(namespace).build();
+      // Create the namespace if it doesn’t exist
+      if (
+        Arrays.stream(admin.listNamespaceDescriptors())
+          .noneMatch(ns -> ns.getName().equals(namespace))
+      ) {
+        admin.createNamespace(nsd);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void deleteNamespace(String namespace) {
+    try {
+      // List table in namespace
+      TableName[] tables = admin.listTableNamesByNamespace(namespace);
+      for (TableName t : tables) {
+        TEST_UTIL.deleteTableIfAny(t);
+      }
+      // Now delete the namespace
+      admin.deleteNamespace(namespace);
+    } catch (Exception e) {
+      LOG.debug(
+        "Unable to delete namespace " + namespace + " post test execution. 
This isn't a failure");
+    }
+  }
+
+  @Test
+  public void testRefreshHFilesForTable() throws Exception {
+    try {
+      // Create table in default namespace
+      TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY);
+      TEST_UTIL.waitTableAvailable(TEST_TABLE);
+
+      // RefreshHFiles for table
+      Long procId = admin.refreshHFiles(TEST_TABLE);
+      assertTrue(procId >= 0);
+    } catch (Exception e) {
+      Assert.fail("RefreshHFilesForTable Should Not Throw Exception: " + e);
+      throw new RuntimeException(e);
+    } finally {
+      // Delete table name post test execution
+      TEST_UTIL.deleteTableIfAny(TEST_TABLE);
+    }
+  }
+
+  // Not creating table hence refresh should throw exception
+  @Test(expected = TableNotFoundException.class)
+  public void testRefreshHFilesForInvalidTable() throws Exception {

Review Comment:
   Similar to my comment above - `testRefreshHFilesForNonexistentTable()` seems 
more accurate



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static 
org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestRefreshHFilesFromClient {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRefreshHFilesFromClient.class);
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRefreshHFilesFromClient.class);
+  private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+  private static AsyncConnection asyncConn;
+  private static Admin admin;
+  private static Configuration conf;
+  private static final TableName TEST_TABLE = 
TableName.valueOf("testRefreshHFilesTable");
+  private static final TableName TEST_INVALID_TABLE =

Review Comment:
   I think `TEST_NONEXISTENT_TABLE` might be a better name here.  Initially, I 
wasn't sure what made this table name invalid.



##########
hbase-shell/src/main/ruby/shell/commands/refresh_hfiles.rb:
##########
@@ -0,0 +1,36 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class RefreshHfiles < Command
+      def help
+        return <<-EOF
+Refresh HFiles for the table.
+For example:
+
+  hbase> refresh_hfiles 'TABLENAME'
+

Review Comment:
   I think it would be helpful if this included more info as well. @sharmaar12 
gave me a helpful description in Slack of why the command shouldn't include 
both `NAMESPACE` and `TABLE_NAME`, along with info on each valid case.  I think 
all of that info would be helpful in the `help` documentation.



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshHFilesProcedure.java:
##########
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER;
+
+import java.io.IOException;
+import java.util.Arrays;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestRefreshHFilesProcedure {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRefreshHFilesProcedure.class);
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRefreshHFilesProcedure.class);
+
+  private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+  private Admin admin;
+  private ProcedureExecutor<MasterProcedureEnv> procExecutor;
+  private static Configuration conf;
+  private static final TableName TEST_TABLE = 
TableName.valueOf("testRefreshHFilesTable");
+  private static final String TEST_NAMESPACE = "testRefreshHFilesNamespace";
+  private static final String TEST_TABLE_IN_NAMESPACE = TEST_NAMESPACE + 
":testTableInNamespace";
+  private static final byte[] TEST_FAMILY = 
Bytes.toBytes("testRefreshHFilesCF1");
+
+  @Before
+  public void setup() throws Exception {
+    conf = TEST_UTIL.getConfiguration();
+    // Shorten the run time of failed unit tests by limiting retries and the 
session timeout
+    // threshold
+    conf.setInt(HBASE_CLIENT_RETRIES_NUMBER, 1);
+    conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
+
+    try {
+      // Start the test cluster
+      TEST_UTIL.startMiniCluster(1);
+      admin = TEST_UTIL.getAdmin();
+      procExecutor = 
TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+    } catch (Exception e) {
+      TEST_UTIL.shutdownMiniCluster();
+      throw new RuntimeException(e);
+    }
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (admin != null) {
+      admin.close();
+    }
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private void createNamespace(String namespace) throws RuntimeException {

Review Comment:
   I see `createNamespace()` and `deleteNamespace()` are used both in this file 
and in `TestRefreshHFilesFromClient.java`. Both files use this sequence a lot 
as well:
   
   ```
   TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY);
   TEST_UTIL.waitTableAvailable(TEST_TABLE);
   ```
   
   If you wanted, you could put these in a base class and make the 
`createTable/waitTableAvailable` sequence a function to reduce repeated code.



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static 
org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestRefreshHFilesFromClient {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRefreshHFilesFromClient.class);
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRefreshHFilesFromClient.class);
+  private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+  private static AsyncConnection asyncConn;
+  private static Admin admin;
+  private static Configuration conf;
+  private static final TableName TEST_TABLE = 
TableName.valueOf("testRefreshHFilesTable");
+  private static final TableName TEST_INVALID_TABLE =
+    TableName.valueOf("testRefreshHFilesInvalidTable");
+  private static final String TEST_NAMESPACE = "testRefreshHFilesNamespace";
+  private static final String TEST_INVALID_NAMESPACE = 
"testRefreshHFilesInvalidNamespace";
+  private static final String TEST_TABLE_IN_NAMESPACE = TEST_NAMESPACE + 
":testTableInNamespace";
+  private static final byte[] TEST_FAMILY = 
Bytes.toBytes("testRefreshHFilesCF1");
+
+  @Before
+  public void setUp() throws Exception {
+    conf = TEST_UTIL.getConfiguration();
+    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 60000);
+    conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 120000);
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
+    conf.setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0);
+    conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
+
+    // Start the test cluster
+    TEST_UTIL.startMiniCluster(1);
+    asyncConn = 
ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
+    admin = TEST_UTIL.getAdmin();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    Closeables.close(asyncConn, true);
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private void createNamespace(String namespace) throws RuntimeException {
+    try {
+      final NamespaceDescriptor nsd = 
NamespaceDescriptor.create(namespace).build();
+      // Create the namespace if it doesn’t exist
+      if (
+        Arrays.stream(admin.listNamespaceDescriptors())
+          .noneMatch(ns -> ns.getName().equals(namespace))
+      ) {
+        admin.createNamespace(nsd);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void deleteNamespace(String namespace) {
+    try {
+      // List table in namespace
+      TableName[] tables = admin.listTableNamesByNamespace(namespace);
+      for (TableName t : tables) {
+        TEST_UTIL.deleteTableIfAny(t);
+      }
+      // Now delete the namespace
+      admin.deleteNamespace(namespace);
+    } catch (Exception e) {
+      LOG.debug(
+        "Unable to delete namespace " + namespace + " post test execution. 
This isn't a failure");
+    }
+  }
+
+  @Test
+  public void testRefreshHFilesForTable() throws Exception {
+    try {
+      // Create table in default namespace
+      TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY);
+      TEST_UTIL.waitTableAvailable(TEST_TABLE);
+
+      // RefreshHFiles for table
+      Long procId = admin.refreshHFiles(TEST_TABLE);
+      assertTrue(procId >= 0);
+    } catch (Exception e) {
+      Assert.fail("RefreshHFilesForTable Should Not Throw Exception: " + e);
+      throw new RuntimeException(e);
+    } finally {
+      // Delete table name post test execution
+      TEST_UTIL.deleteTableIfAny(TEST_TABLE);
+    }
+  }
+
+  // Not creating table hence refresh should throw exception
+  @Test(expected = TableNotFoundException.class)
+  public void testRefreshHFilesForInvalidTable() throws Exception {
+    // RefreshHFiles for table
+    admin.refreshHFiles(TEST_INVALID_TABLE);
+  }
+
+  @Test
+  public void testRefreshHFilesForNamespace() throws Exception {
+    try {
+      createNamespace(TEST_NAMESPACE);
+
+      // Create table under test namespace
+      TEST_UTIL.createTable(TableName.valueOf(TEST_TABLE_IN_NAMESPACE), 
TEST_FAMILY);
+      TEST_UTIL.waitTableAvailable(TableName.valueOf(TEST_TABLE_IN_NAMESPACE));
+
+      // RefreshHFiles for namespace
+      Long procId = admin.refreshHFiles(TEST_NAMESPACE);
+      assertTrue(procId >= 0);
+
+    } catch (Exception e) {
+      Assert.fail("RefreshHFilesForAllNamespace Should Not Throw Exception: " 
+ e);
+      throw new RuntimeException(e);
+    } finally {
+      // Delete namespace post test execution
+      deleteNamespace(TEST_NAMESPACE);
+    }
+  }
+
+  @Test(expected = NamespaceNotFoundException.class)
+  public void testRefreshHFilesForInvalidNamespace() throws Exception {

Review Comment:
   Similar to my other comment - `testRefreshHFilesForNonexistentNamespace()` 
seems more accurate.



##########
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesTableProcedure.java:
##########
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshHFilesTableProcedureState;
+
[email protected]
+public class RefreshHFilesTableProcedure
+  extends AbstractStateMachineTableProcedure<RefreshHFilesTableProcedureState> 
{
+  private static final Logger LOG = 
LoggerFactory.getLogger(RefreshHFilesTableProcedure.class);
+
+  private TableName tableName;
+  private String namespaceName;
+
+  public RefreshHFilesTableProcedure() {
+    super();
+  }
+
+  public RefreshHFilesTableProcedure(MasterProcedureEnv env) {
+    super(env);
+  }
+
+  public RefreshHFilesTableProcedure(MasterProcedureEnv env, TableName 
tableName) {
+    super(env);
+    this.tableName = tableName;
+  }
+
+  public RefreshHFilesTableProcedure(MasterProcedureEnv env, String 
namespaceName) {
+    super(env);
+    this.namespaceName = namespaceName;
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+    return TableOperationType.REFRESH_HFILES;
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    super.serializeStateData(serializer);
+    MasterProcedureProtos.RefreshHFilesTableProcedureStateData.Builder builder 
=
+      MasterProcedureProtos.RefreshHFilesTableProcedureStateData.newBuilder();
+    if (tableName != null && namespaceName == null) {
+      builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
+    } else if (tableName == null && namespaceName != null) {
+      builder.setNamespaceName(namespaceName);
+    }
+    serializer.serialize(builder.build());
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    super.deserializeStateData(serializer);
+    MasterProcedureProtos.RefreshHFilesTableProcedureStateData data =
+      
serializer.deserialize(MasterProcedureProtos.RefreshHFilesTableProcedureStateData.class);
+    if (data.hasTableName() && !data.hasNamespaceName()) {
+      this.tableName = ProtobufUtil.toTableName(data.getTableName());
+    } else if (!data.hasTableName() && data.hasNamespaceName()) {
+      this.namespaceName = data.getNamespaceName();
+    }
+  }
+
+  @Override
+  public TableName getTableName() {
+    if (tableName != null && namespaceName == null) {
+      return tableName;
+    }
+    return DUMMY_NAMESPACE_TABLE_NAME;
+  }
+
+  @Override
+  protected RefreshHFilesTableProcedureState getInitialState() {
+    return RefreshHFilesTableProcedureState.REFRESH_HFILES_PREPARE;
+  }
+
+  @Override
+  protected int getStateId(RefreshHFilesTableProcedureState state) {
+    return state.getNumber();
+  }
+
+  @Override
+  protected RefreshHFilesTableProcedureState getState(int stateId) {
+    return RefreshHFilesTableProcedureState.forNumber(stateId);
+  }
+
+  @Override
+  protected void rollbackState(MasterProcedureEnv env, 
RefreshHFilesTableProcedureState state)
+    throws IOException, InterruptedException {
+    // Refresh HFiles is idempotent operation hence rollback is not needed
+    LOG.trace("Rollback not implemented for RefreshHFilesTableProcedure state: 
{}", state);
+  }
+
+  @Override
+  protected Flow executeFromState(MasterProcedureEnv env, 
RefreshHFilesTableProcedureState state) {
+    LOG.info("Executing RefreshHFilesTableProcedureState state: {}", state);
+
+    try {
+      return switch (state) {
+        case REFRESH_HFILES_PREPARE -> prepare(env);
+        case REFRESH_HFILES_REFRESH_REGION -> refreshHFiles(env);
+        case REFRESH_HFILES_FINISH -> finish();
+        default -> throw new UnsupportedOperationException("Unhandled state: " 
+ state);
+      };
+    } catch (Exception ex) {
+      LOG.error("Error in RefreshHFilesTableProcedure state {}", state, ex);
+      setFailure("RefreshHFilesTableProcedure", ex);
+      return Flow.NO_MORE_STATE;
+    }
+  }
+
+  private Flow prepare(final MasterProcedureEnv env) {
+    
setNextState(RefreshHFilesTableProcedureState.REFRESH_HFILES_REFRESH_REGION);
+    return Flow.HAS_MORE_STATE;
+  }
+
+  private void refreshHFilesForTable(final MasterProcedureEnv env, TableName 
tableName) {
+    addChildProcedure(env.getAssignmentManager().getTableRegions(tableName, 
true).stream()
+      .map(r -> new 
RefreshHFilesRegionProcedure(r)).toArray(RefreshHFilesRegionProcedure[]::new));

Review Comment:
   nit
   ```suggestion
         
.map(RefreshHFilesRegionProcedure::new).toArray(RefreshHFilesRegionProcedure[]::new));
   ```



##########
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RefreshHFilesCallable.java:
##########
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+

Review Comment:
   Can you add some documentation here as well?



##########
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesRegionProcedure.java:
##########
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.Optional;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
+import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
+import org.apache.hadoop.hbase.regionserver.RefreshHFilesCallable;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+
[email protected]
+public class RefreshHFilesRegionProcedure extends Procedure<MasterProcedureEnv>
+  implements TableProcedureInterface,
+  RemoteProcedureDispatcher.RemoteProcedure<MasterProcedureEnv, ServerName> {
+  private static final Logger LOG = 
LoggerFactory.getLogger(RefreshHFilesRegionProcedure.class);
+  private RegionInfo region;
+  private ProcedureEvent<?> event;
+  private boolean dispatched;
+  private boolean succ;
+  private RetryCounter retryCounter;
+
+  public RefreshHFilesRegionProcedure() {
+  }
+
+  public RefreshHFilesRegionProcedure(RegionInfo region) {
+    this.region = region;
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    MasterProcedureProtos.RefreshHFilesRegionProcedureStateData data =
+      
serializer.deserialize(MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.class);
+    this.region = ProtobufUtil.toRegionInfo(data.getRegion());
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.Builder 
builder =
+      MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.newBuilder();
+    builder.setRegion(ProtobufUtil.toRegionInfo(region));
+    serializer.serialize(builder.build());
+  }
+
+  @Override
+  protected boolean abort(MasterProcedureEnv env) {
+    return false;
+  }
+
+  @Override
+  protected void rollback(MasterProcedureEnv env) throws IOException, 
InterruptedException {
+    throw new UnsupportedOperationException();
+  }
+
+  private void setTimeoutForSuspend(MasterProcedureEnv env, String reason) {
+    if (retryCounter == null) {
+      retryCounter = 
ProcedureUtil.createRetryCounter(env.getMasterConfiguration());
+    }
+    long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
+    LOG.warn("{} can not run currently because {}, wait {} ms to retry", this, 
reason, backoff);
+    setTimeout(Math.toIntExact(backoff));
+    setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
+    skipPersistence();
+  }
+
+  @Override
+  protected Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env)
+    throws ProcedureYieldException, ProcedureSuspendedException, 
InterruptedException {
+    if (dispatched) {
+      if (succ) {
+        return null;
+      }
+      dispatched = false;
+    }
+
+    RegionStates regionStates = env.getAssignmentManager().getRegionStates();
+    RegionStateNode regionNode = regionStates.getRegionStateNode(region);
+
+    if (regionNode.getProcedure() != null) {
+      setTimeoutForSuspend(env, String.format("region %s has a TRSP attached 
%s",
+        region.getRegionNameAsString(), regionNode.getProcedure()));
+      throw new ProcedureSuspendedException();
+    }
+
+    if (!regionNode.isInState(RegionState.State.OPEN)) {
+      LOG.info("State of region {} is not OPEN. Skip {} ...", region, this);
+      setTimeoutForSuspend(env, String.format("region state of %s is %s",
+        region.getRegionNameAsString(), regionNode.getState()));
+      return null;
+    }
+
+    ServerName targetServer = regionNode.getRegionLocation();
+    if (targetServer == null) {
+      setTimeoutForSuspend(env,
+        String.format("target server of region %s is null", 
region.getRegionNameAsString()));
+      throw new ProcedureSuspendedException();
+    }
+
+    try {
+      env.getRemoteDispatcher().addOperationToNode(targetServer, this);
+      dispatched = true;
+      event = new ProcedureEvent<>(this);
+      event.suspendIfNotReady(this);
+      throw new ProcedureSuspendedException();
+    } catch (FailedRemoteDispatchException e) {
+      setTimeoutForSuspend(env, "Failed send request to " + targetServer);
+      throw new ProcedureSuspendedException();
+    }
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+    return TableOperationType.REFRESH_HFILES;
+  }
+
+  @Override
+  public TableName getTableName() {
+    return region.getTable();
+  }
+
+  @Override
+  public void remoteOperationFailed(MasterProcedureEnv env, 
RemoteProcedureException error) {
+    complete(env, error);
+  }
+
+  @Override
+  public void remoteOperationCompleted(MasterProcedureEnv env) {
+    complete(env, null);
+  }
+
+  @Override
+  public void remoteCallFailed(MasterProcedureEnv env, ServerName serverName, 
IOException e) {
+    complete(env, e);
+  }
+
+  private void complete(MasterProcedureEnv env, Throwable error) {
+    if (isFinished()) {
+      LOG.info("This procedure {} is already finished, skip the rest 
processes", this.getProcId());
+      return;
+    }
+    if (event == null) {
+      LOG.warn("procedure event for {} is null, maybe the procedure is created 
when recovery",
+        getProcId());

Review Comment:
   What do you mean by, "maybe the procedure is created when recovery"? I am 
not familiar with any recovery procedures for `refresh_hfiles`.



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static 
org.apache.hadoop.hbase.client.AsyncConnectionConfiguration.START_LOG_ERRORS_AFTER_COUNT_KEY;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestRefreshHFilesFromClient {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRefreshHFilesFromClient.class);
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRefreshHFilesFromClient.class);
+  private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+  private static AsyncConnection asyncConn;
+  private static Admin admin;
+  private static Configuration conf;
+  private static final TableName TEST_TABLE = 
TableName.valueOf("testRefreshHFilesTable");
+  private static final TableName TEST_INVALID_TABLE =
+    TableName.valueOf("testRefreshHFilesInvalidTable");
+  private static final String TEST_NAMESPACE = "testRefreshHFilesNamespace";
+  private static final String TEST_INVALID_NAMESPACE = 
"testRefreshHFilesInvalidNamespace";

Review Comment:
   Similar to my previous comments. It seems the namespace is more so 
nonexistent rather than invalid.
   ```suggestion
     private static final String TEST_NONEXISTENT_NAMESPACE = 
"testRefreshHFilesInvalidNamespace";
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to