From f6defff8673e0e94e166f971ce4ec11bd9e07c03 Mon Sep 17 00:00:00 2001 From: Peng Lu Date: Sat, 28 Feb 2026 20:28:23 +0800 Subject: [PATCH 1/2] HBASE-29862 Test case TestClearRegionBlockCache#testClearBlockCache failed --- .../hbase/io/hfile/bucket/BucketCache.java | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 2854ff598ddd..4b9e2b10c7e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -1420,8 +1420,7 @@ protected String getAlgorithm() { */ @Override public int evictBlocksByHfileName(String hfileName) { - Set keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), - true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true); + Set keySet = getAllCacheKeysForFile(hfileName, 0L, Long.MAX_VALUE); int numEvicted = 0; for (BlockCacheKey key : keySet) { @@ -1433,6 +1432,26 @@ public int evictBlocksByHfileName(String hfileName) { return numEvicted; } + private Set getAllCacheKeysForFile(String hfileName, long init, long end) { + Set cacheKeys = new HashSet<>(); + // At this moment, Some Bucket Entries may be in the WriterThread queue, and not yet put into + // the backingMap. So, when executing this method, we should check both the RAMCache and + // backingMap to ensure all CacheKeys are obtained. + // For more details, please refer to HBASE-29862. + Set ramCacheKeySet = ramCache.getRamBlockCacheKeysForHFile(hfileName); + for (BlockCacheKey key : ramCacheKeySet) { + if (key.getOffset() >= init && key.getOffset() <= end) { + cacheKeys.add(key); + } + } + + // These keys are just for comparison and are short lived, so we need only file name and offset + cacheKeys.addAll(blocksByHFile.subSet(new BlockCacheKey(hfileName, init), true, + new BlockCacheKey(hfileName, end), true)); + return cacheKeys; + } + + /** * Used to group bucket entries into priority buckets. There will be a BucketEntryGroup for each * priority (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate @@ -1770,5 +1789,16 @@ public void clear() { re.getData().release(); } } + + public Set getRamBlockCacheKeysForHFile(String fileName) { + Set ramCacheKeySet = new HashSet<>(); + for (BlockCacheKey blockCacheKey : delegate.keySet()) { + if (blockCacheKey.getHfileName().equals(fileName)) { + ramCacheKeySet.add(blockCacheKey); + } + } + + return ramCacheKeySet; + } } } From 327903e0d71716d5a5c2e5a0dd9f932c0fa737d9 Mon Sep 17 00:00:00 2001 From: Peng Lu Date: Sat, 28 Feb 2026 23:28:10 +0800 Subject: [PATCH 2/2] mvn spotless:apply --- .../org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 4b9e2b10c7e7..0ca8665f3540 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -1451,7 +1451,6 @@ private Set getAllCacheKeysForFile(String hfileName, long init, l return cacheKeys; } - /** * Used to group bucket entries into priority buckets. There will be a BucketEntryGroup for each * priority (single, multi, memory). Once bucketed, the eviction algorithm takes the appropriate