wchevreuil commented on code in PR #7291:
URL: https://github.com/apache/hbase/pull/7291#discussion_r2343902527


##########
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowCacheService.java:
##########
@@ -0,0 +1,361 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static 
org.apache.hadoop.hbase.HConstants.ROW_CACHE_ACTIVATE_MIN_HFILES_DEFAULT;
+import static 
org.apache.hadoop.hbase.HConstants.ROW_CACHE_ACTIVATE_MIN_HFILES_KEY;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.CheckAndMutate;
+import org.apache.hadoop.hbase.client.CheckAndMutateResult;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Consistency;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.hfile.RowCacheKey;
+import org.apache.hadoop.hbase.ipc.RpcCallContext;
+import org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement;
+import org.apache.hadoop.hbase.quotas.OperationQuota;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
+
+/**
+ * It is responsible for populating the row cache and retrieving rows from it.
+ */
+class RowCacheService {
+  /**
+   * A barrier that prevents the row cache from being populated during table 
operations, such as
+   * bulk loads. It is implemented as a counter to address issues that arise 
when the same table is
+   * updated concurrently.
+   */
+  private final Map<TableName, AtomicInteger> tableLevelBarrierMap = new 
ConcurrentHashMap<>();
+  /**
+   * A barrier that prevents the row cache from being populated during row 
mutations. It is
+   * implemented as a counter to address issues that arise when the same row 
is mutated
+   * concurrently.
+   */
+  private final Map<RowCacheKey, AtomicInteger> rowLevelBarrierMap = new 
ConcurrentHashMap<>();
+  private int activateMinHFiles;
+
+  @FunctionalInterface
+  interface RowOperation<R> {
+    R execute() throws IOException;
+  }
+
+  RowCacheService(Configuration conf) {
+    updateConf(conf);
+  }
+
+  synchronized void updateConf(Configuration conf) {
+    this.activateMinHFiles =
+      conf.getInt(ROW_CACHE_ACTIVATE_MIN_HFILES_KEY, 
ROW_CACHE_ACTIVATE_MIN_HFILES_DEFAULT);
+  }
+
+  RegionScannerImpl getScanner(HRegion region, Get get, Scan scan, List<Cell> 
results)
+    throws IOException {
+    if (!canCacheRow(get, region)) {
+      return getScannerInternal(region, scan, results);
+    }
+
+    RowCacheKey key = new RowCacheKey(region, get.getRow());
+
+    // Try get from row cache
+    if (tryGetFromCache(region, key, get, results)) {
+      // Cache is hit, and then no scanner is created
+      return null;
+    }
+
+    RegionScannerImpl scanner = getScannerInternal(region, scan, results);
+
+    // The row cache is ineffective when the number of store files is small. 
If the number
+    // of store files falls below the minimum threshold, rows will not be 
cached
+    if (hasSufficientHFiles(region)) {
+      populateCache(region, results, key);
+    }
+
+    return scanner;
+  }
+
+  private RegionScannerImpl getScannerInternal(HRegion region, Scan scan, 
List<Cell> results)
+    throws IOException {
+    RegionScannerImpl scanner = region.getScanner(scan);
+    scanner.next(results);

Review Comment:
   I wonder if the row cache should be something separate from block cache, as 
"semantically", it's not actual a block cache. 
   And then, we should decide what to do when reading as a Get. If we miss the 
row in the row cache, we may still find it in the block cache, and we'll cache 
it in the row cache (as it is right now, and I think such redundancy is valid, 
given the performance gain for subsequent gets). However, what if we miss the 
row in the block cache too? Currently, the default for 
hbase.block.data.cacheonread property is true, which would cache the whole 
block in the block cache. The question: Do we want to cache blocks in the case 
of a Get read now? 
   
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to