From fcfc6748b06ab64d976bda45ddf621b87ce59e82 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 17 Apr 2026 15:34:41 +0800 Subject: [PATCH 1/4] HBASE-30086 Rewrite TestFromClientSide related tests --- .../FromClientSideScanExcpetionTestBase.java | 231 ++ ...ientSide.java => FromClientSideTest1.java} | 582 ++-- ...TestBase.java => FromClientSideTest3.java} | 304 +- .../hbase/client/FromClientSideTest4.java | 1173 +++++++ .../hbase/client/FromClientSideTest5.java | 2745 ++++++++++++++++ ...eBase.java => FromClientSideTestBase.java} | 117 +- ...ntSideTestFilterAcrossMultipleRegions.java | 93 + .../client/TestClientDataStructureMisc.java | 118 + .../hbase/client/TestConnectionReconnect.java | 113 + .../hbase/client/TestFromClientSide1.java | 40 + .../TestFromClientSide1WithCoprocessor.java | 42 + .../hbase/client/TestFromClientSide3.java | 11 +- .../TestFromClientSide3WithCoprocessor.java | 42 + .../hbase/client/TestFromClientSide4.java | 1335 +------- .../TestFromClientSide4WithCoprocessor.java | 47 + .../hbase/client/TestFromClientSide5.java | 2886 +---------------- .../TestFromClientSide5WithCoprocessor.java | 46 + ...ClientSideFilterAcrossMultipleRegions.java | 42 + ...rAcrossMultipleRegionsWithCoprocessor.java | 43 + .../client/TestFromClientSideNoCodec.java | 47 +- .../TestFromClientSideScanExcpetion.java | 224 +- ...lientSideScanExcpetionWithCoprocessor.java | 19 +- .../TestFromClientSideWithCoprocessor.java | 63 - .../TestFromClientSideWithCoprocessor4.java | 61 - .../TestFromClientSideWithCoprocessor5.java | 60 - .../client/TestIncrementsFromClientSide.java | 2 +- .../client/TestScannersFromClientSide.java | 2 +- .../hbase/client/TestTableScanMetrics.java | 243 +- .../TestPostIncrementAndAppendBeforeWAL.java | 4 +- .../regionserver/NoOpScanPolicyObserver.java | 3 +- .../util/TestFromClientSide3WoUnsafe.java | 15 +- 31 files changed, 5397 insertions(+), 5356 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideScanExcpetionTestBase.java rename hbase-server/src/test/java/org/apache/hadoop/hbase/client/{TestFromClientSide.java => FromClientSideTest1.java} (79%) rename hbase-server/src/test/java/org/apache/hadoop/hbase/client/{FromClientSide3TestBase.java => FromClientSideTest3.java} (85%) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest4.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest5.java rename hbase-server/src/test/java/org/apache/hadoop/hbase/client/{FromClientSideBase.java => FromClientSideTestBase.java} (93%) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestFilterAcrossMultipleRegions.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientDataStructureMisc.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionReconnect.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide1.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide1WithCoprocessor.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3WithCoprocessor.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4WithCoprocessor.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5WithCoprocessor.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideFilterAcrossMultipleRegions.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideFilterAcrossMultipleRegionsWithCoprocessor.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor4.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor5.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideScanExcpetionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideScanExcpetionTestBase.java new file mode 100644 index 000000000000..36ba1da3ed88 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideScanExcpetionTestBase.java @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.NavigableSet; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ExtendedCell; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestExtension; +import org.apache.hadoop.hbase.exceptions.ScannerResetException; +import org.apache.hadoop.hbase.regionserver.DelegatingKeyValueScanner; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.KeyValueScanner; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.regionserver.ReversedStoreScanner; +import org.apache.hadoop.hbase.regionserver.ScanInfo; +import org.apache.hadoop.hbase.regionserver.StoreScanner; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WAL; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; + +public class FromClientSideScanExcpetionTestBase { + + protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + protected static byte[] FAMILY = Bytes.toBytes("testFamily"); + + protected static int SLAVES = 3; + + @RegisterExtension + private TableNameTestExtension name = new TableNameTestExtension(); + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + private static AtomicBoolean ON = new AtomicBoolean(false); + private static AtomicLong REQ_COUNT = new AtomicLong(0); + private static AtomicBoolean IS_DO_NOT_RETRY = new AtomicBoolean(false); // whether to throw + // DNRIOE + private static AtomicBoolean THROW_ONCE = new AtomicBoolean(true); // whether to only throw once + + private static void reset() { + ON.set(false); + REQ_COUNT.set(0); + IS_DO_NOT_RETRY.set(false); + THROW_ONCE.set(true); + } + + private static void inject() { + ON.set(true); + } + + protected static void startCluster() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3); + conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 6000000); + conf.setClass(HConstants.REGION_IMPL, MyHRegion.class, HRegion.class); + conf.setBoolean("hbase.client.log.scanner.activity", true); + // We need more than one region server in this test + TEST_UTIL.startMiniCluster(SLAVES); + } + + public static final class MyHRegion extends HRegion { + + @SuppressWarnings("deprecation") + public MyHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration confParam, + RegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices) { + super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); + } + + @Override + protected HStore instantiateHStore(ColumnFamilyDescriptor family, boolean warmup) + throws IOException { + return new MyHStore(this, family, conf, warmup); + } + } + + public static final class MyHStore extends HStore { + + public MyHStore(HRegion region, ColumnFamilyDescriptor family, Configuration confParam, + boolean warmup) throws IOException { + super(region, family, confParam, warmup); + } + + @Override + protected KeyValueScanner createScanner(Scan scan, ScanInfo scanInfo, + NavigableSet targetCols, long readPt) throws IOException { + return scan.isReversed() + ? new ReversedStoreScanner(this, scanInfo, scan, targetCols, readPt) + : new MyStoreScanner(this, scanInfo, scan, targetCols, readPt); + } + } + + public static final class MyStoreScanner extends StoreScanner { + public MyStoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet columns, + long readPt) throws IOException { + super(store, scanInfo, scan, columns, readPt); + } + + @Override + protected List selectScannersFrom(HStore store, + List allScanners) { + List scanners = super.selectScannersFrom(store, allScanners); + List newScanners = new ArrayList<>(scanners.size()); + for (KeyValueScanner scanner : scanners) { + newScanners.add(new DelegatingKeyValueScanner(scanner) { + @Override + public boolean reseek(ExtendedCell key) throws IOException { + if (ON.get()) { + REQ_COUNT.incrementAndGet(); + if (!THROW_ONCE.get() || REQ_COUNT.get() == 1) { + if (IS_DO_NOT_RETRY.get()) { + throw new DoNotRetryIOException("Injected exception"); + } else { + throw new IOException("Injected exception"); + } + } + } + return super.reseek(key); + } + }); + } + return newScanners; + } + } + + /** + * Tests the case where a Scan can throw an IOException in the middle of the seek / reseek leaving + * the server side RegionScanner to be in dirty state. The client has to ensure that the + * ClientScanner does not get an exception and also sees all the data. + */ + @Test + public void testClientScannerIsResetWhenScanThrowsIOException() + throws IOException, InterruptedException { + reset(); + THROW_ONCE.set(true); // throw exceptions only once + TableName tableName = name.getTableName(); + try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { + int rowCount = TEST_UTIL.loadTable(t, FAMILY, false); + TEST_UTIL.getAdmin().flush(tableName); + inject(); + int actualRowCount = HBaseTestingUtil.countRows(t, new Scan().addColumn(FAMILY, FAMILY)); + assertEquals(rowCount, actualRowCount); + } + assertTrue(REQ_COUNT.get() > 0); + } + + /** + * Tests the case where a coprocessor throws a DoNotRetryIOException in the scan. The expectation + * is that the exception will bubble up to the client scanner instead of being retried. + */ + @Test + public void testScannerThrowsExceptionWhenCoprocessorThrowsDNRIOE() + throws IOException, InterruptedException { + reset(); + IS_DO_NOT_RETRY.set(true); + TableName tableName = name.getTableName(); + try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { + TEST_UTIL.loadTable(t, FAMILY, false); + TEST_UTIL.getAdmin().flush(tableName); + inject(); + HBaseTestingUtil.countRows(t, new Scan().addColumn(FAMILY, FAMILY)); + fail("Should have thrown an exception"); + } catch (DoNotRetryIOException expected) { + // expected + } + assertTrue(REQ_COUNT.get() > 0); + } + + /** + * Tests the case where a coprocessor throws a regular IOException in the scan. The expectation is + * that the we will keep on retrying, but fail after the retries are exhausted instead of retrying + * indefinitely. + */ + @Test + public void testScannerFailsAfterRetriesWhenCoprocessorThrowsIOE() + throws IOException, InterruptedException { + TableName tableName = name.getTableName(); + reset(); + THROW_ONCE.set(false); // throw exceptions in every retry + try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { + TEST_UTIL.loadTable(t, FAMILY, false); + TEST_UTIL.getAdmin().flush(tableName); + inject(); + HBaseTestingUtil.countRows(t, new Scan().addColumn(FAMILY, FAMILY)); + fail("Should have thrown an exception"); + } catch (ScannerResetException expected) { + // expected + } catch (RetriesExhaustedException e) { + // expected + assertThat(e.getCause(), instanceOf(ScannerResetException.class)); + } + assertTrue(REQ_COUNT.get() >= 3); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest1.java similarity index 79% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest1.java index 2e4a13fc8ce4..70d96d54d36b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest1.java @@ -17,53 +17,36 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.HBaseTestingUtil.countRows; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNameTestRule; -import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.KeyOnlyFilter; import org.apache.hadoop.hbase.filter.LongComparator; import org.apache.hadoop.hbase.filter.QualifierFilter; import org.apache.hadoop.hbase.filter.RegexStringComparator; import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.AfterClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.TestTemplate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,43 +59,21 @@ * @see TestFromClientSide5 */ // NOTE: Increment tests were moved to their own class, TestIncrementsFromClientSide. -@Category({ LargeTests.class, ClientTests.class }) -@SuppressWarnings("deprecation") -@RunWith(Parameterized.class) -public class TestFromClientSide extends FromClientSideBase { - private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide.class); - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSide.class); - @Rule - public TableNameTestRule name = new TableNameTestRule(); - - // To keep the child classes happy. - TestFromClientSide() { - } - - public TestFromClientSide(Class registry, int numHedgedReqs) throws Exception { - initialize(registry, numHedgedReqs, MultiRowMutationEndpoint.class); - } +public class FromClientSideTest1 extends FromClientSideTestBase { - @Parameterized.Parameters - public static Collection parameters() { - return Arrays.asList(new Object[][] { { MasterRegistry.class, 1 }, { MasterRegistry.class, 2 }, - { ZKConnectionRegistry.class, 1 } }); - } + private static final Logger LOG = LoggerFactory.getLogger(FromClientSideTest1.class); - @AfterClass - public static void tearDownAfterClass() throws Exception { - afterClass(); + protected FromClientSideTest1(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); } /** * Test append result when there are duplicate rpc request. */ - @Test + @TestTemplate public void testDuplicateAppend() throws Exception { - TableDescriptorBuilder builder = TEST_UTIL.createModifyableTableDescriptor(name.getTableName(), + TableDescriptorBuilder builder = TEST_UTIL.createModifyableTableDescriptor(tableName, ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); Map kvs = new HashMap<>(); @@ -121,13 +82,13 @@ public void testDuplicateAppend() throws Exception { .newBuilder(SleepAtFirstRpcCall.class.getName()).setPriority(1).setProperties(kvs).build()); TEST_UTIL.createTable(builder.build(), new byte[][] { ROW }).close(); - Configuration c = new Configuration(TEST_UTIL.getConfiguration()); + Configuration c = getClientConf(); c.setInt(HConstants.HBASE_CLIENT_PAUSE, 50); // Client will retry because rpc timeout is small than the sleep time of first rpc call c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); try (Connection connection = ConnectionFactory.createConnection(c); Table table = - connection.getTableBuilder(name.getTableName(), null).setOperationTimeout(3 * 1000).build()) { + connection.getTableBuilder(tableName, null).setOperationTimeout(3 * 1000).build()) { Append append = new Append(ROW); append.addColumn(HBaseTestingUtil.fam1, QUALIFIER, VALUE); Result result = table.append(append); @@ -148,9 +109,9 @@ public void testDuplicateAppend() throws Exception { /** * Test batch append result when there are duplicate rpc request. */ - @Test + @TestTemplate public void testDuplicateBatchAppend() throws Exception { - TableDescriptorBuilder builder = TEST_UTIL.createModifyableTableDescriptor(name.getTableName(), + TableDescriptorBuilder builder = TEST_UTIL.createModifyableTableDescriptor(tableName, ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS, 3, HConstants.FOREVER, ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED); Map kvs = new HashMap<>(); @@ -159,13 +120,13 @@ public void testDuplicateBatchAppend() throws Exception { .newBuilder(SleepAtFirstRpcCall.class.getName()).setPriority(1).setProperties(kvs).build()); TEST_UTIL.createTable(builder.build(), new byte[][] { ROW }).close(); - Configuration c = new Configuration(TEST_UTIL.getConfiguration()); + Configuration c = getClientConf(); c.setInt(HConstants.HBASE_CLIENT_PAUSE, 50); // Client will retry because rpc timeout is small than the sleep time of first rpc call c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); try (Connection connection = ConnectionFactory.createConnection(c); Table table = - connection.getTableBuilder(name.getTableName(), null).setOperationTimeout(3 * 1000).build()) { + connection.getTableBuilder(tableName, null).setOperationTimeout(3 * 1000).build()) { Append append = new Append(ROW); append.addColumn(HBaseTestingUtil.fam1, QUALIFIER, VALUE); @@ -189,9 +150,8 @@ public void testDuplicateBatchAppend() throws Exception { /** * Basic client side validation of HBASE-4536 */ - @Test + @TestTemplate public void testKeepDeletedCells() throws Exception { - final TableName tableName = name.getTableName(); final byte[] FAMILY = Bytes.toBytes("family"); final byte[] C0 = Bytes.toBytes("c0"); @@ -204,7 +164,7 @@ public void testKeepDeletedCells() throws Exception { .setKeepDeletedCells(KeepDeletedCells.TRUE).setMaxVersions(3).build()) .build(); TEST_UTIL.getAdmin().createTable(tableDescriptor); - try (Table h = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection conn = getConnection(); Table h = conn.getTable(tableName)) { long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.addColumn(FAMILY, C0, T1); @@ -255,15 +215,16 @@ public void testKeepDeletedCells() throws Exception { /** * Basic client side validation of HBASE-10118 */ - @Test + @TestTemplate public void testPurgeFutureDeletes() throws Exception { - final TableName tableName = name.getTableName(); + final byte[] ROW = Bytes.toBytes("row"); final byte[] FAMILY = Bytes.toBytes("family"); final byte[] COLUMN = Bytes.toBytes("column"); final byte[] VALUE = Bytes.toBytes("value"); + TEST_UTIL.createTable(tableName, FAMILY); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { // future timestamp long ts = EnvironmentEdgeManager.currentTime() * 2; Put put = new Put(ROW, ts); @@ -304,12 +265,13 @@ public void testPurgeFutureDeletes() throws Exception { * Verifies that getConfiguration returns the same Configuration object used to create the HTable * instance. */ - @Test + @TestTemplate public void testGetConfiguration() throws Exception { - final TableName tableName = name.getTableName(); byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") }; - Configuration conf = TEST_UTIL.getConfiguration(); - try (Table table = TEST_UTIL.createTable(tableName, FAMILIES)) { + TEST_UTIL.createTable(tableName, FAMILIES); + Configuration conf = getClientConf(); + try (Connection conn = ConnectionFactory.createConnection(conf); + Table table = conn.getTable(tableName)) { assertSame(conf, table.getConfiguration()); } } @@ -317,12 +279,12 @@ public void testGetConfiguration() throws Exception { /** * Test from client side of an involved filter against a multi family that involves deletes. */ - @Test + @TestTemplate public void testWeirdCacheBehaviour() throws Exception { - final TableName tableName = name.getTableName(); byte[][] FAMILIES = new byte[][] { Bytes.toBytes("trans-blob"), Bytes.toBytes("trans-type"), Bytes.toBytes("trans-date"), Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") }; - try (Table ht = TEST_UTIL.createTable(tableName, FAMILIES)) { + TEST_UTIL.createTable(tableName, FAMILIES); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { String value = "this is the value"; String value2 = "this is some other value"; String keyPrefix1 = HBaseTestingUtil.getRandomUUID().toString(); @@ -334,137 +296,70 @@ public void testWeirdCacheBehaviour() throws Exception { putRows(ht, 3, value2, keyPrefix1); putRows(ht, 3, value2, keyPrefix2); putRows(ht, 3, value2, keyPrefix3); - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { - System.out.println("Checking values for key: " + keyPrefix1); - assertEquals("Got back incorrect number of rows from scan", 3, - getNumberOfRows(keyPrefix1, value2, table)); - System.out.println("Checking values for key: " + keyPrefix2); - assertEquals("Got back incorrect number of rows from scan", 3, - getNumberOfRows(keyPrefix2, value2, table)); - System.out.println("Checking values for key: " + keyPrefix3); - assertEquals("Got back incorrect number of rows from scan", 3, - getNumberOfRows(keyPrefix3, value2, table)); - deleteColumns(ht, value2, keyPrefix1); - deleteColumns(ht, value2, keyPrefix2); - deleteColumns(ht, value2, keyPrefix3); - System.out.println("Starting important checks....."); - assertEquals("Got back incorrect number of rows from scan: " + keyPrefix1, 0, - getNumberOfRows(keyPrefix1, value2, table)); - assertEquals("Got back incorrect number of rows from scan: " + keyPrefix2, 0, - getNumberOfRows(keyPrefix2, value2, table)); - assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3, 0, - getNumberOfRows(keyPrefix3, value2, table)); - } - } - } - - /** - * Test filters when multiple regions. It does counts. Needs eye-balling of logs to ensure that - * we're not scanning more regions that we're supposed to. Related to the TestFilterAcrossRegions - * over in the o.a.h.h.filter package. - */ - @Test - public void testFilterAcrossMultipleRegions() throws IOException { - final TableName tableName = name.getTableName(); - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - int rowCount = TEST_UTIL.loadTable(t, FAMILY, false); - assertRowCount(t, rowCount); - // Split the table. Should split on a reasonable key; 'lqj' - List regions = splitTable(t); - assertRowCount(t, rowCount); - // Get end key of first region. - byte[] endKey = regions.get(0).getRegion().getEndKey(); - // Count rows with a filter that stops us before passed 'endKey'. - // Should be count of rows in first region. - int endKeyCount = countRows(t, createScanWithRowFilter(endKey)); - assertTrue(endKeyCount < rowCount); - - // How do I know I did not got to second region? Thats tough. Can't really - // do that in client-side region test. I verified by tracing in debugger. - // I changed the messages that come out when set to DEBUG so should see - // when scanner is done. Says "Finished with scanning..." with region name. - // Check that its finished in right region. - - // New test. Make it so scan goes into next region by one and then two. - // Make sure count comes out right. - byte[] key = new byte[] { endKey[0], endKey[1], (byte) (endKey[2] + 1) }; - int plusOneCount = countRows(t, createScanWithRowFilter(key)); - assertEquals(endKeyCount + 1, plusOneCount); - key = new byte[] { endKey[0], endKey[1], (byte) (endKey[2] + 2) }; - int plusTwoCount = countRows(t, createScanWithRowFilter(key)); - assertEquals(endKeyCount + 2, plusTwoCount); - - // New test. Make it so I scan one less than endkey. - key = new byte[] { endKey[0], endKey[1], (byte) (endKey[2] - 1) }; - int minusOneCount = countRows(t, createScanWithRowFilter(key)); - assertEquals(endKeyCount - 1, minusOneCount); - // For above test... study logs. Make sure we do "Finished with scanning.." - // in first region and that we do not fall into the next region. - - key = new byte[] { 'a', 'a', 'a' }; - int countBBB = countRows(t, createScanWithRowFilter(key, null, CompareOperator.EQUAL)); - assertEquals(1, countBBB); - - int countGreater = - countRows(t, createScanWithRowFilter(endKey, null, CompareOperator.GREATER_OR_EQUAL)); - // Because started at start of table. - assertEquals(0, countGreater); - countGreater = - countRows(t, createScanWithRowFilter(endKey, endKey, CompareOperator.GREATER_OR_EQUAL)); - assertEquals(rowCount - endKeyCount, countGreater); + LOG.info("Checking values for key: " + keyPrefix1); + assertEquals(3, getNumberOfRows(keyPrefix1, value2, ht), + "Got back incorrect number of rows from scan"); + LOG.info("Checking values for key: " + keyPrefix2); + assertEquals(3, getNumberOfRows(keyPrefix2, value2, ht), + "Got back incorrect number of rows from scan"); + LOG.info("Checking values for key: " + keyPrefix3); + assertEquals(3, getNumberOfRows(keyPrefix3, value2, ht), + "Got back incorrect number of rows from scan"); + deleteColumns(ht, value2, keyPrefix1); + deleteColumns(ht, value2, keyPrefix2); + deleteColumns(ht, value2, keyPrefix3); + LOG.info("Starting important checks....."); + assertEquals(0, getNumberOfRows(keyPrefix1, value2, ht), + "Got back incorrect number of rows from scan: " + keyPrefix1); + assertEquals(0, getNumberOfRows(keyPrefix2, value2, ht), + "Got back incorrect number of rows from scan: " + keyPrefix2); + assertEquals(0, getNumberOfRows(keyPrefix3, value2, ht), + "Got back incorrect number of rows from scan: " + keyPrefix3); } } - @Test + @TestTemplate public void testSuperSimple() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); ht.put(put); Scan scan = new Scan(); scan.addColumn(FAMILY, tableName.toBytes()); - ResultScanner scanner = ht.getScanner(scan); - Result result = scanner.next(); - assertNull("Expected null result", result); - scanner.close(); + try (ResultScanner scanner = ht.getScanner(scan)) { + Result result = scanner.next(); + assertNull(result, "Expected null result"); + } } } - @Test + @TestTemplate public void testMaxKeyValueSize() throws Exception { - final TableName tableName = name.getTableName(); - Configuration conf = TEST_UTIL.getConfiguration(); - String oldMaxSize = conf.get(ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - byte[] value = new byte[4 * 1024 * 1024]; - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, value); - ht.put(put); + TEST_UTIL.createTable(tableName, FAMILY); + byte[] value = new byte[4 * 1024 * 1024]; + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, value); + Configuration conf = getClientConf(); + // no problem + try (Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(tableName)) { + t.put(put); + } - try { - TEST_UTIL.getConfiguration().setInt(ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, - 2 * 1024 * 1024); - // Create new table so we pick up the change in Configuration. - try (Connection connection = - ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - try (Table t = connection.getTable(TableName.valueOf(FAMILY))) { - put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, value); - t.put(put); - } - } - fail("Inserting a too large KeyValue worked, should throw exception"); - } catch (Exception ignored) { - } + // set max kv size limit + conf.setInt(ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, 2 * 1024 * 1024); + try (Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(tableName)) { + assertThrows(IllegalArgumentException.class, () -> t.put(put), + "Inserting a too large KeyValue worked, should throw exception"); } - conf.set(ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, oldMaxSize); } - @Test + @TestTemplate public void testFilters() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { byte[][] ROWS = makeN(ROW, 10); byte[][] QUALIFIERS = { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), @@ -497,10 +392,10 @@ public void testFilters() throws Exception { } } - @Test + @TestTemplate public void testFilterWithLongCompartor() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { byte[][] ROWS = makeN(ROW, 10); byte[][] values = new byte[10][]; for (int i = 0; i < 10; i++) { @@ -529,10 +424,10 @@ public void testFilterWithLongCompartor() throws Exception { } } - @Test + @TestTemplate public void testKeyOnlyFilter() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { byte[][] ROWS = makeN(ROW, 10); byte[][] QUALIFIERS = { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), @@ -566,10 +461,10 @@ public void testKeyOnlyFilter() throws Exception { /** * Test simple table and non-existent row cases. */ - @Test + @TestTemplate public void testSimpleMissing() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { byte[][] ROWS = makeN(ROW, 4); // Try to get a row on an empty table @@ -677,15 +572,14 @@ public void testSimpleMissing() throws Exception { * Test basic puts, gets, scans, and deletes for a single row in a multiple family table. */ @SuppressWarnings("checkstyle:MethodLength") - @Test + @TestTemplate public void testSingleRowMultipleFamily() throws Exception { - final TableName tableName = name.getTableName(); byte[][] ROWS = makeN(ROW, 3); byte[][] FAMILIES = makeNAscii(FAMILY, 10); byte[][] QUALIFIERS = makeN(QUALIFIER, 10); byte[][] VALUES = makeN(VALUE, 10); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILIES)) { + TEST_UTIL.createTable(tableName, FAMILIES); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { //////////////////////////////////////////////////////////////////////////// // Insert one column to one family //////////////////////////////////////////////////////////////////////////// @@ -945,136 +839,113 @@ public void testSingleRowMultipleFamily() throws Exception { } } - @Test(expected = NullPointerException.class) + @TestTemplate public void testNullTableName() throws IOException { // Null table name (should NOT work) - TEST_UTIL.createTable(null, FAMILY); - fail("Creating a table with null name passed, should have failed"); + assertThrows(NullPointerException.class, () -> TEST_UTIL.createTable(null, FAMILY), + "Creating a table with null name passed, should have failed"); } - @Test(expected = IllegalArgumentException.class) + @TestTemplate public void testNullFamilyName() throws IOException { - final TableName tableName = name.getTableName(); - // Null family (should NOT work) - TEST_UTIL.createTable(tableName, new byte[][] { null }); - fail("Creating a table with a null family passed, should fail"); + assertThrows(IllegalArgumentException.class, + () -> TEST_UTIL.createTable(tableName, new byte[][] { null }), + "Creating a table with a null family passed, should fail"); } - @Test + @TestTemplate public void testNullRowAndQualifier() throws Exception { - final TableName tableName = name.getTableName(); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { // Null row (should NOT work) - try { + assertThrows(RuntimeException.class, () -> { Put put = new Put((byte[]) null); put.addColumn(FAMILY, QUALIFIER, VALUE); ht.put(put); - fail("Inserting a null row worked, should throw exception"); - } catch (Exception ignored) { - } + }, "Inserting a null row worked, should throw exception"); // Null qualifier (should work) - { - Put put = new Put(ROW); - put.addColumn(FAMILY, null, VALUE); - ht.put(put); + ht.put(new Put(ROW).addColumn(FAMILY, null, VALUE)); - getTestNull(ht, ROW, FAMILY, VALUE); + getTestNull(ht, ROW, FAMILY, VALUE); - scanTestNull(ht, ROW, FAMILY, VALUE); + scanTestNull(ht, ROW, FAMILY, VALUE); - Delete delete = new Delete(ROW); - delete.addColumns(FAMILY, null); - ht.delete(delete); + Delete delete = new Delete(ROW); + delete.addColumns(FAMILY, null); + ht.delete(delete); - Get get = new Get(ROW); - Result result = ht.get(get); - assertEmptyResult(result); - } + Get get = new Get(ROW); + Result result = ht.get(get); + assertEmptyResult(result); } } - @Test + @TestTemplate public void testNullEmptyQualifier() throws Exception { - final TableName tableName = name.getTableName(); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { // Empty qualifier, byte[0] instead of null (should work) - try { - Put put = new Put(ROW); - put.addColumn(FAMILY, HConstants.EMPTY_BYTE_ARRAY, VALUE); - ht.put(put); - - getTestNull(ht, ROW, FAMILY, VALUE); + Put put = new Put(ROW); + put.addColumn(FAMILY, HConstants.EMPTY_BYTE_ARRAY, VALUE); + ht.put(put); - scanTestNull(ht, ROW, FAMILY, VALUE); + getTestNull(ht, ROW, FAMILY, VALUE); - // Flush and try again + scanTestNull(ht, ROW, FAMILY, VALUE); - TEST_UTIL.flush(); + // Flush and try again - getTestNull(ht, ROW, FAMILY, VALUE); + TEST_UTIL.flush(); - scanTestNull(ht, ROW, FAMILY, VALUE); + getTestNull(ht, ROW, FAMILY, VALUE); - Delete delete = new Delete(ROW); - delete.addColumns(FAMILY, HConstants.EMPTY_BYTE_ARRAY); - ht.delete(delete); + scanTestNull(ht, ROW, FAMILY, VALUE); - Get get = new Get(ROW); - Result result = ht.get(get); - assertEmptyResult(result); + Delete delete = new Delete(ROW); + delete.addColumns(FAMILY, HConstants.EMPTY_BYTE_ARRAY); + ht.delete(delete); - } catch (Exception e) { - throw new IOException("Using a row with null qualifier should not throw exception"); - } + Get get = new Get(ROW); + Result result = ht.get(get); + assertEmptyResult(result); } } - @Test - public void testNullValue() throws IOException { - final TableName tableName = name.getTableName(); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { + @TestTemplate + public void testNullValue() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { // Null value - try { - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, null); - ht.put(put); - - Get get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - Result result = ht.get(get); - assertSingleResult(result, ROW, FAMILY, QUALIFIER, null); + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, null); + ht.put(put); - Scan scan = new Scan(); - scan.addColumn(FAMILY, QUALIFIER); - result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROW, FAMILY, QUALIFIER, null); + Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + Result result = ht.get(get); + assertSingleResult(result, ROW, FAMILY, QUALIFIER, null); - Delete delete = new Delete(ROW); - delete.addColumns(FAMILY, QUALIFIER); - ht.delete(delete); + Scan scan = new Scan(); + scan.addColumn(FAMILY, QUALIFIER); + result = getSingleScanResult(ht, scan); + assertSingleResult(result, ROW, FAMILY, QUALIFIER, null); - get = new Get(ROW); - result = ht.get(get); - assertEmptyResult(result); + Delete delete = new Delete(ROW); + delete.addColumns(FAMILY, QUALIFIER); + ht.delete(delete); - } catch (Exception e) { - throw new IOException("Null values should be allowed, but threw exception"); - } + get = new Get(ROW); + result = ht.get(get); + assertEmptyResult(result); } } - @Test + @TestTemplate public void testNullQualifier() throws Exception { - final TableName tableName = name.getTableName(); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { - + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { // Work for Put Put put = new Put(ROW); put.addColumn(FAMILY, null, VALUE); @@ -1115,29 +986,27 @@ public void testNullQualifier() throws Exception { put = new Put(ROW); put.addColumn(FAMILY, null, Bytes.toBytes("checkAndPut")); table.put(put); - table.checkAndMutate(ROW, FAMILY).ifEquals(VALUE).thenPut(put); + table.checkAndMutate(CheckAndMutate.newBuilder(ROW).ifEquals(FAMILY, null, VALUE).build(put)); RowMutations mutate = new RowMutations(ROW); mutate.add(new Put(ROW).addColumn(FAMILY, null, Bytes.toBytes("checkAndMutate"))); - table.checkAndMutate(ROW, FAMILY).ifEquals(Bytes.toBytes("checkAndPut")).thenMutate(mutate); + table.checkAndMutate(CheckAndMutate.newBuilder(ROW) + .ifEquals(FAMILY, null, Bytes.toBytes("checkAndPut")).build(mutate)); delete = new Delete(ROW); delete.addColumns(FAMILY, null); - table.checkAndMutate(ROW, FAMILY).ifEquals(Bytes.toBytes("checkAndMutate")) - .thenDelete(delete); + table.checkAndMutate(CheckAndMutate.newBuilder(ROW) + .ifEquals(FAMILY, null, Bytes.toBytes("checkAndMutate")).build(delete)); } } - @Test + @TestTemplate @SuppressWarnings("checkstyle:MethodLength") public void testVersions() throws Exception { - final TableName tableName = name.getTableName(); - long[] STAMPS = makeStamps(20); byte[][] VALUES = makeNAscii(VALUE, 20); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { // Insert 4 versions of same column Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); @@ -1350,16 +1219,15 @@ public void testVersions() throws Exception { } } - @Test + @TestTemplate @SuppressWarnings("checkstyle:MethodLength") public void testVersionLimits() throws Exception { - final TableName tableName = name.getTableName(); byte[][] FAMILIES = makeNAscii(FAMILY, 3); int[] LIMITS = { 1, 3, 5 }; long[] STAMPS = makeStamps(10); byte[][] VALUES = makeNAscii(VALUE, 10); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILIES, LIMITS)) { - + TEST_UTIL.createTable(tableName, FAMILIES, LIMITS); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { // Insert limit + 1 on each family Put put = new Put(ROW); put.addColumn(FAMILIES[0], QUALIFIER, STAMPS[0], VALUES[0]); @@ -1482,7 +1350,7 @@ public void testVersionLimits() throws Exception { get = new Get(ROW); get.readVersions(Integer.MAX_VALUE); result = ht.get(get); - assertEquals("Expected 9 keys but received " + result.size(), 9, result.size()); + assertEquals(9, result.size(), "Expected 9 keys but received " + result.size()); get = new Get(ROW); get.addFamily(FAMILIES[0]); @@ -1490,7 +1358,7 @@ public void testVersionLimits() throws Exception { get.addFamily(FAMILIES[2]); get.readVersions(Integer.MAX_VALUE); result = ht.get(get); - assertEquals("Expected 9 keys but received " + result.size(), 9, result.size()); + assertEquals(9, result.size(), "Expected 9 keys but received " + result.size()); get = new Get(ROW); get.addColumn(FAMILIES[0], QUALIFIER); @@ -1498,12 +1366,12 @@ public void testVersionLimits() throws Exception { get.addColumn(FAMILIES[2], QUALIFIER); get.readVersions(Integer.MAX_VALUE); result = ht.get(get); - assertEquals("Expected 9 keys but received " + result.size(), 9, result.size()); + assertEquals(9, result.size(), "Expected 9 keys but received " + result.size()); scan = new Scan().withStartRow(ROW); scan.readVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertEquals("Expected 9 keys but received " + result.size(), 9, result.size()); + assertEquals(9, result.size(), "Expected 9 keys but received " + result.size()); scan = new Scan().withStartRow(ROW); scan.readVersions(Integer.MAX_VALUE); @@ -1511,7 +1379,7 @@ public void testVersionLimits() throws Exception { scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); result = getSingleScanResult(ht, scan); - assertEquals("Expected 9 keys but received " + result.size(), 9, result.size()); + assertEquals(9, result.size(), "Expected 9 keys but received " + result.size()); scan = new Scan().withStartRow(ROW); scan.readVersions(Integer.MAX_VALUE); @@ -1519,59 +1387,54 @@ public void testVersionLimits() throws Exception { scan.addColumn(FAMILIES[1], QUALIFIER); scan.addColumn(FAMILIES[2], QUALIFIER); result = getSingleScanResult(ht, scan); - assertEquals("Expected 9 keys but received " + result.size(), 9, result.size()); + assertEquals(9, result.size(), "Expected 9 keys but received " + result.size()); } } - @Test + @TestTemplate public void testDeleteFamilyVersion() throws Exception { - try (Admin admin = TEST_UTIL.getAdmin()) { - final TableName tableName = name.getTableName(); - - byte[][] QUALIFIERS = makeNAscii(QUALIFIER, 1); - byte[][] VALUES = makeN(VALUE, 5); - long[] ts = { 1000, 2000, 3000, 4000, 5000 }; + TEST_UTIL.createTable(tableName, FAMILY, 5); + byte[][] QUALIFIERS = makeNAscii(QUALIFIER, 1); + byte[][] VALUES = makeN(VALUE, 5); + long[] ts = { 1000, 2000, 3000, 4000, 5000 }; + try (Connection conn = getConnection(); Admin admin = conn.getAdmin(); + Table ht = conn.getTable(tableName)) { + Put put = new Put(ROW); + for (int q = 0; q < 1; q++) { + for (int t = 0; t < 5; t++) { + put.addColumn(FAMILY, QUALIFIERS[q], ts[t], VALUES[t]); + } + } + ht.put(put); + admin.flush(tableName); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 5)) { + Delete delete = new Delete(ROW); + delete.addFamilyVersion(FAMILY, ts[1]); // delete version '2000' + delete.addFamilyVersion(FAMILY, ts[3]); // delete version '4000' + ht.delete(delete); + admin.flush(tableName); - Put put = new Put(ROW); - for (int q = 0; q < 1; q++) { - for (int t = 0; t < 5; t++) { - put.addColumn(FAMILY, QUALIFIERS[q], ts[t], VALUES[t]); - } - } - ht.put(put); - admin.flush(tableName); - - Delete delete = new Delete(ROW); - delete.addFamilyVersion(FAMILY, ts[1]); // delete version '2000' - delete.addFamilyVersion(FAMILY, ts[3]); // delete version '4000' - ht.delete(delete); - admin.flush(tableName); - - for (int i = 0; i < 1; i++) { - Get get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIERS[i]); - get.readVersions(Integer.MAX_VALUE); - Result result = ht.get(get); - // verify version '1000'/'3000'/'5000' remains for all columns - assertNResult(result, ROW, FAMILY, QUALIFIERS[i], new long[] { ts[0], ts[2], ts[4] }, - new byte[][] { VALUES[0], VALUES[2], VALUES[4] }, 0, 2); - } + for (int i = 0; i < 1; i++) { + Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIERS[i]); + get.readVersions(Integer.MAX_VALUE); + Result result = ht.get(get); + // verify version '1000'/'3000'/'5000' remains for all columns + assertNResult(result, ROW, FAMILY, QUALIFIERS[i], new long[] { ts[0], ts[2], ts[4] }, + new byte[][] { VALUES[0], VALUES[2], VALUES[4] }, 0, 2); } } } - @Test + @TestTemplate public void testDeleteFamilyVersionWithOtherDeletes() throws Exception { - final TableName tableName = name.getTableName(); - + TEST_UTIL.createTable(tableName, FAMILY, 5); byte[][] QUALIFIERS = makeNAscii(QUALIFIER, 5); byte[][] VALUES = makeN(VALUE, 5); long[] ts = { 1000, 2000, 3000, 4000, 5000 }; - try (Admin admin = TEST_UTIL.getAdmin(); - Table ht = TEST_UTIL.createTable(tableName, FAMILY, 5)) { + try (Connection conn = getConnection(); Admin admin = conn.getAdmin(); + Table ht = conn.getTable(tableName)) { Put put; Result result; Get get; @@ -1670,15 +1533,13 @@ public void testDeleteFamilyVersionWithOtherDeletes() throws Exception { } } - @Test + @TestTemplate public void testDeleteWithFailed() throws Exception { - final TableName tableName = name.getTableName(); - byte[][] FAMILIES = makeNAscii(FAMILY, 3); byte[][] VALUES = makeN(VALUE, 5); long[] ts = { 1000, 2000, 3000, 4000, 5000 }; - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILIES, 3)) { + TEST_UTIL.createTable(tableName, FAMILIES, 3); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { Put put = new Put(ROW); put.addColumn(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); ht.put(put); @@ -1696,18 +1557,15 @@ public void testDeleteWithFailed() throws Exception { } } - @Test + @TestTemplate @SuppressWarnings("checkstyle:MethodLength") public void testDeletes() throws Exception { - final TableName tableName = name.getTableName(); - byte[][] ROWS = makeNAscii(ROW, 6); byte[][] FAMILIES = makeNAscii(FAMILY, 3); byte[][] VALUES = makeN(VALUE, 5); long[] ts = { 1000, 2000, 3000, 4000, 5000 }; - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILIES, 3)) { - + TEST_UTIL.createTable(tableName, FAMILIES, 3); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { Put put = new Put(ROW); put.addColumn(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); put.addColumn(FAMILIES[0], QUALIFIER, ts[1], VALUES[1]); @@ -1825,8 +1683,8 @@ public void testDeletes() throws Exception { get.addFamily(FAMILIES[2]); get.readVersions(Integer.MAX_VALUE); result = ht.get(get); - assertEquals("Expected 4 key but received " + result.size() + ": " + result, 4, - result.size()); + assertEquals(4, result.size(), + "Expected 4 key but received " + result.size() + ": " + result); delete = new Delete(ROWS[0]); delete.addFamily(FAMILIES[2]); @@ -1847,7 +1705,7 @@ public void testDeletes() throws Exception { get.addFamily(FAMILIES[2]); get.readVersions(Integer.MAX_VALUE); result = ht.get(get); - assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); + assertEquals(2, result.size(), "Expected 2 keys but received " + result.size()); assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, new long[] { ts[0], ts[1] }, new byte[][] { VALUES[0], VALUES[1] }, 0, 1); @@ -1856,7 +1714,7 @@ public void testDeletes() throws Exception { scan.addFamily(FAMILIES[2]); scan.readVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); + assertEquals(2, result.size(), "Expected 2 keys but received " + result.size()); assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, new long[] { ts[0], ts[1] }, new byte[][] { VALUES[0], VALUES[1] }, 0, 1); @@ -1865,14 +1723,14 @@ public void testDeletes() throws Exception { get.addFamily(FAMILIES[2]); get.readVersions(Integer.MAX_VALUE); result = ht.get(get); - assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); + assertEquals(2, result.size(), "Expected 2 keys but received " + result.size()); scan = new Scan().withStartRow(ROWS[1]); scan.addFamily(FAMILIES[1]); scan.addFamily(FAMILIES[2]); scan.readVersions(Integer.MAX_VALUE); result = getSingleScanResult(ht, scan); - assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); + assertEquals(2, result.size(), "Expected 2 keys but received " + result.size()); get = new Get(ROWS[2]); get.addFamily(FAMILIES[1]); @@ -1912,14 +1770,14 @@ public void testDeletes() throws Exception { get.addFamily(FAMILIES[2]); get.readVersions(Integer.MAX_VALUE); result = ht.get(get); - assertEquals("Expected 1 key but received " + result.size(), 1, result.size()); + assertEquals(1, result.size(), "Expected 1 key but received " + result.size()); get = new Get(ROWS[4]); get.addFamily(FAMILIES[1]); get.addFamily(FAMILIES[2]); get.readVersions(Integer.MAX_VALUE); result = ht.get(get); - assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); + assertEquals(2, result.size(), "Expected 2 keys but received " + result.size()); scan = new Scan().withStartRow(ROWS[3]); scan.addFamily(FAMILIES[1]); @@ -1927,11 +1785,11 @@ public void testDeletes() throws Exception { scan.readVersions(Integer.MAX_VALUE); ResultScanner scanner = ht.getScanner(scan); result = scanner.next(); - assertEquals("Expected 1 key but received " + result.size(), 1, result.size()); + assertEquals(1, result.size(), "Expected 1 key but received " + result.size()); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]), ROWS[3])); assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]), VALUES[0])); result = scanner.next(); - assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); + assertEquals(2, result.size(), "Expected 2 keys but received " + result.size()); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]), ROWS[4])); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[1]), ROWS[4])); assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]), VALUES[1])); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSide3TestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest3.java similarity index 85% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSide3TestBase.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest3.java index 518fe56bac49..741f8ffe6a76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSide3TestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest3.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.hbase.client; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -41,7 +44,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.Coprocessor; -import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionMetrics; @@ -63,66 +65,39 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInfo; +import org.junit.jupiter.api.TestTemplate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos; -public class FromClientSide3TestBase { +public class FromClientSideTest3 extends FromClientSideTestBase { + + protected FromClientSideTest3(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } - private static final Logger LOG = LoggerFactory.getLogger(FromClientSide3TestBase.class); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static final Logger LOG = LoggerFactory.getLogger(FromClientSideTest3.class); private static int WAITTABLE_MILLIS; - private static byte[] FAMILY; - private static int SLAVES; - private static byte[] ROW; private static byte[] ANOTHERROW; - private static byte[] QUALIFIER; - private static byte[] VALUE; private static byte[] COL_QUAL; private static byte[] VAL_BYTES; private static byte[] ROW_BYTES; - private TableName tableName; - - protected static void startCluster() throws Exception { + protected static void startCluster(Class... cps) throws Exception { WAITTABLE_MILLIS = 10000; - FAMILY = Bytes.toBytes("testFamily"); SLAVES = 3; - ROW = Bytes.toBytes("testRow"); ANOTHERROW = Bytes.toBytes("anotherrow"); - QUALIFIER = Bytes.toBytes("testQualifier"); - VALUE = Bytes.toBytes("testValue"); COL_QUAL = Bytes.toBytes("f1"); VAL_BYTES = Bytes.toBytes("v1"); ROW_BYTES = Bytes.toBytes("r1"); - TEST_UTIL.startMiniCluster(SLAVES); - } - - @AfterAll - public static void shutdownCluster() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @BeforeEach - public void setUp(TestInfo testInfo) throws Exception { - tableName = TableName.valueOf(testInfo.getTestMethod().get().getName()); - } - - @AfterEach - public void tearDown() throws Exception { - for (TableDescriptor htd : TEST_UTIL.getAdmin().listTableDescriptors()) { - LOG.info("Tear down, remove table=" + htd.getTableName()); - TEST_UTIL.deleteTable(htd.getTableName()); - } + initialize(cps); } private void randomCFPuts(Table table, byte[] row, byte[] family, int nPuts) throws Exception { @@ -156,9 +131,11 @@ private static List toList(ResultScanner scanner) { } } - @Test + @TestTemplate public void testScanAfterDeletingSpecifiedRow() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); byte[] row = Bytes.toBytes("SpecifiedRow"); byte[] value0 = Bytes.toBytes("value_0"); @@ -199,10 +176,11 @@ public void testScanAfterDeletingSpecifiedRow() throws IOException, InterruptedE } } - @Test + @TestTemplate public void testScanAfterDeletingSpecifiedRowV2() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { byte[] row = Bytes.toBytes("SpecifiedRow"); byte[] qual0 = Bytes.toBytes("qual0"); byte[] qual1 = Bytes.toBytes("qual1"); @@ -248,7 +226,7 @@ private int getStoreFileCount(Admin admin, ServerName serverName, RegionInfo reg } // override the config settings at the CF level and ensure priority - @Test + @TestTemplate public void testAdvancedConfigOverride() throws Exception { /* * Overall idea: (1) create 3 store files and issue a compaction. config's compaction.min == 3, @@ -258,11 +236,10 @@ public void testAdvancedConfigOverride() throws Exception { * table. The CF schema should override the Table schema and now cause a minor compaction. */ TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3); - - try (Table table = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - Admin admin = TEST_UTIL.getAdmin(); - + TEST_UTIL.createTable(tableName, FAMILY, 10); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName); + Admin admin = conn.getAdmin()) { // Create 3 store files. byte[] row = Bytes.toBytes(ThreadLocalRandom.current().nextInt()); performMultiplePutAndFlush(admin, table, row, FAMILY, 3, 100); @@ -352,10 +329,11 @@ public void testAdvancedConfigOverride() throws Exception { } } - @Test + @TestTemplate public void testHTableBatchWithEmptyPut() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { List actions = new ArrayList<>(); Object[] results = new Object[2]; // create an empty Put @@ -374,15 +352,15 @@ public void testHTableBatchWithEmptyPut() throws IOException, InterruptedExcepti // Test Table.batch with large amount of mutations against the same key. // It used to trigger read lock's "Maximum lock count exceeded" Error. - @Test + @TestTemplate public void testHTableWithLargeBatch() throws IOException, InterruptedException { int sixtyFourK = 64 * 1024; List actions = new ArrayList<>(); Object[] results = new Object[(sixtyFourK + 1) * 2]; - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { for (int i = 0; i < sixtyFourK + 1; i++) { Put put1 = new Put(ROW); put1.addColumn(FAMILY, QUALIFIER, VALUE); @@ -397,13 +375,13 @@ public void testHTableWithLargeBatch() throws IOException, InterruptedException } } - @Test + @TestTemplate public void testBatchWithRowMutation() throws Exception { LOG.info("Starting testBatchWithRowMutation"); byte[][] QUALIFIERS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b") }; - - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { RowMutations arm = RowMutations .of(Collections.singletonList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE))); @@ -422,21 +400,20 @@ public void testBatchWithRowMutation() throws Exception { assertNull(r.getValue(FAMILY, QUALIFIERS[0])); // Test that we get the correct remote exception for RowMutations from batch() - try { - arm = RowMutations.of(Collections.singletonList( + RetriesExhaustedException e = assertThrows(RetriesExhaustedException.class, () -> { + RowMutations m = RowMutations.of(Collections.singletonList( new Put(ROW).addColumn(new byte[] { 'b', 'o', 'g', 'u', 's' }, QUALIFIERS[0], VALUE))); - table.batch(Arrays.asList(arm), batchResult); - fail("Expected RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException"); - } catch (RetriesExhaustedException e) { - String msg = e.getMessage(); - assertTrue(msg.contains("NoSuchColumnFamilyException")); - } + table.batch(Arrays.asList(m), batchResult); + }, "Expected RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException"); + assertThat(e.getMessage(), containsString("NoSuchColumnFamilyException")); } } - @Test + @TestTemplate public void testBatchWithCheckAndMutate() throws Exception { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { byte[] row1 = Bytes.toBytes("row1"); byte[] row2 = Bytes.toBytes("row2"); byte[] row3 = Bytes.toBytes("row3"); @@ -540,12 +517,12 @@ public void testBatchWithCheckAndMutate() throws Exception { } } - @Test + @TestTemplate public void testHTableExistsMethodSingleRegionSingleGet() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { // Test with a single region table. Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); @@ -562,12 +539,12 @@ public void testHTableExistsMethodSingleRegionSingleGet() } } - @Test + @TestTemplate public void testHTableExistsMethodSingleRegionMultipleGets() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); table.put(put); @@ -582,11 +559,11 @@ public void testHTableExistsMethodSingleRegionMultipleGets() } } - @Test + @TestTemplate public void testHTableExistsBeforeGet() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); table.put(put); @@ -602,11 +579,11 @@ public void testHTableExistsBeforeGet() throws IOException, InterruptedException } } - @Test + @TestTemplate public void testHTableExistsAllBeforeGet() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { final byte[] ROW2 = Bytes.add(ROW, Bytes.toBytes("2")); Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); @@ -633,12 +610,12 @@ public void testHTableExistsAllBeforeGet() throws IOException, InterruptedExcept } } - @Test + @TestTemplate public void testHTableExistsMethodMultipleRegionsSingleGet() throws Exception { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, - new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, new byte[] { 0x00 }, + new byte[] { (byte) 0xff }, 255); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); @@ -654,12 +631,12 @@ public void testHTableExistsMethodMultipleRegionsSingleGet() throws Exception { } } - @Test + @TestTemplate public void testHTableExistsMethodMultipleRegionsMultipleGets() throws Exception { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, - new byte[] { 0x00 }, new byte[] { (byte) 0xff }, 255)) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 1, new byte[] { 0x00 }, + new byte[] { (byte) 0xff }, 255); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); table.put(put); @@ -705,12 +682,12 @@ public void testHTableExistsMethodMultipleRegionsMultipleGets() throws Exception } } - @Test + @TestTemplate public void testGetEmptyRow() throws Exception { // Create a table and put in 1 row - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { Put put = new Put(ROW_BYTES); put.addColumn(FAMILY, COL_QUAL, VAL_BYTES); table.put(put); @@ -731,7 +708,7 @@ public void testGetEmptyRow() throws Exception { } } - @Test + @TestTemplate public void testConnectionDefaultUsesCodec() throws Exception { try ( RpcClient client = RpcClientFactory.createClient(TEST_UTIL.getConfiguration(), "cluster")) { @@ -739,10 +716,10 @@ public void testConnectionDefaultUsesCodec() throws Exception { } } - @Test + @TestTemplate public void testPutWithPreBatchMutate() throws Exception { testPreBatchMutate(tableName, () -> { - try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); t.put(put); @@ -752,10 +729,10 @@ public void testPutWithPreBatchMutate() throws Exception { }); } - @Test + @TestTemplate public void testRowMutationsWithPreBatchMutate() throws Exception { testPreBatchMutate(tableName, () -> { - try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { RowMutations rm = new RowMutations(ROW, 1); Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); @@ -771,36 +748,37 @@ private void testPreBatchMutate(TableName tableName, Runnable rn) throws Excepti TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) .setCoprocessor(WaitingForScanObserver.class.getName()).build(); - TEST_UTIL.getAdmin().createTable(tableDescriptor); - // Don't use waitTableAvailable(), because the scanner will mess up the co-processor - - ExecutorService service = Executors.newFixedThreadPool(2); - service.execute(rn); - final List cells = new ArrayList<>(); - service.execute(() -> { - try { - // waiting for update. - TimeUnit.SECONDS.sleep(3); - try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { - Scan scan = new Scan(); - try (ResultScanner scanner = t.getScanner(scan)) { - for (Result r : scanner) { - cells.addAll(Arrays.asList(r.rawCells())); + try (Connection conn = getConnection(); Admin admin = conn.getAdmin()) { + admin.createTable(tableDescriptor); + // Don't use waitTableAvailable(), because the scanner will mess up the co-processor + + ExecutorService service = Executors.newFixedThreadPool(2); + service.execute(rn); + final List cells = new ArrayList<>(); + service.execute(() -> { + try { + // waiting for update. + TimeUnit.SECONDS.sleep(3); + try (Table t = conn.getTable(tableName)) { + Scan scan = new Scan(); + try (ResultScanner scanner = t.getScanner(scan)) { + for (Result r : scanner) { + cells.addAll(Arrays.asList(r.rawCells())); + } } } + } catch (IOException | InterruptedException ex) { + throw new RuntimeException(ex); } - } catch (IOException | InterruptedException ex) { - throw new RuntimeException(ex); - } - }); - service.shutdown(); - service.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); - assertEquals(0, cells.size(), "The write is blocking by RegionObserver#postBatchMutate" - + ", so the data is invisible to reader"); - TEST_UTIL.deleteTable(tableName); + }); + service.shutdown(); + service.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); + assertEquals(0, cells.size(), "The write is blocking by RegionObserver#postBatchMutate" + + ", so the data is invisible to reader"); + } } - @Test + @TestTemplate public void testLockLeakWithDelta() throws Exception, Throwable { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) @@ -810,7 +788,7 @@ public void testLockLeakWithDelta() throws Exception, Throwable { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); // new a connection for lower retry number. - Configuration copy = new Configuration(TEST_UTIL.getConfiguration()); + Configuration copy = getClientConf(); copy.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); try (Connection con = ConnectionFactory.createConnection(copy)) { HRegion region = (HRegion) find(tableName); @@ -855,7 +833,7 @@ public void testLockLeakWithDelta() throws Exception, Throwable { assertEquals(0, readLockCount); } - @Test + @TestTemplate public void testMultiRowMutations() throws Exception, Throwable { TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)) @@ -866,7 +844,7 @@ public void testMultiRowMutations() throws Exception, Throwable { TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); // new a connection for lower retry number. - Configuration copy = new Configuration(TEST_UTIL.getConfiguration()); + Configuration copy = getClientConf(); copy.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); try (Connection con = ConnectionFactory.createConnection(copy)) { byte[] row = Bytes.toBytes("ROW-0"); @@ -953,10 +931,11 @@ public void testMultiRowMutations() throws Exception, Throwable { * disobey the multi version concurrency control rules. This test case is to reproduce this * scenario. */ - @Test + @TestTemplate public void testMVCCUsingMVCCPreAssign() throws IOException, InterruptedException { - try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY })) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { // put two row first to init the scanner Put put = new Put(Bytes.toBytes("0")); put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes("0")); @@ -967,40 +946,38 @@ public void testMVCCUsingMVCCPreAssign() throws IOException, InterruptedExceptio Scan scan = new Scan(); scan.setTimeRange(0, Long.MAX_VALUE); scan.setCaching(1); - ResultScanner scanner = table.getScanner(scan); - int rowNum = scanner.next() != null ? 1 : 0; - // the started scanner shouldn't see the rows put below - for (int i = 1; i < 1000; i++) { - put = new Put(Bytes.toBytes(String.valueOf(i))); - put.setDurability(Durability.ASYNC_WAL); - put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes(i)); - table.put(put); - } - for (Result result : scanner) { - rowNum++; + try (ResultScanner scanner = table.getScanner(scan)) { + int rowNum = scanner.next() != null ? 1 : 0; + // the started scanner shouldn't see the rows put below + for (int i = 1; i < 1000; i++) { + put = new Put(Bytes.toBytes(String.valueOf(i))); + put.setDurability(Durability.ASYNC_WAL); + put.addColumn(FAMILY, Bytes.toBytes(""), Bytes.toBytes(i)); + table.put(put); + } + rowNum += Iterables.size(scanner); + // scanner should only see two rows + assertEquals(2, rowNum); } - // scanner should only see two rows - assertEquals(2, rowNum); - scanner = table.getScanner(scan); - rowNum = 0; - for (Result result : scanner) { - rowNum++; + try (ResultScanner scanner = table.getScanner(scan)) { + int rowNum = Iterables.size(scanner); + // the new scanner should see all rows + assertEquals(1001, rowNum); } - // the new scanner should see all rows - assertEquals(1001, rowNum); } + } - @Test + @TestTemplate public void testPutThenGetWithMultipleThreads() throws Exception { final int THREAD_NUM = 20; final int ROUND_NUM = 10; for (int round = 0; round < ROUND_NUM; round++) { ArrayList threads = new ArrayList<>(THREAD_NUM); final AtomicInteger successCnt = new AtomicInteger(0); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); - + TEST_UTIL.createTable(tableName, FAMILY); + TEST_UTIL.waitTableAvailable(tableName, WAITTABLE_MILLIS); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { for (int i = 0; i < THREAD_NUM; i++) { final int index = i; Thread t = new Thread(new Runnable() { @@ -1037,8 +1014,9 @@ public void run() { t.join(); } assertEquals(THREAD_NUM, successCnt.get(), "Not equal in round " + round); + } finally { + TEST_UTIL.deleteTable(tableName); } - TEST_UTIL.deleteTable(tableName); } } @@ -1121,7 +1099,7 @@ static byte[] generateHugeValue(int size) { return value; } - @Test + @TestTemplate public void testScanWithBatchSizeReturnIncompleteCells() throws IOException, InterruptedException { TableDescriptor hd = TableDescriptorBuilder.newBuilder(tableName) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest4.java new file mode 100644 index 000000000000..e00d049bc6a7 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest4.java @@ -0,0 +1,1173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.NavigableMap; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.jupiter.api.TestTemplate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; + +/** + * Run tests that use the HBase clients; {@link Table}. Sets up the HBase mini cluster once at start + * and runs through all client tests. Each creates a table named for the method and does its stuff + * against that. Parameterized to run with different registry implementations. + */ +public class FromClientSideTest4 extends FromClientSideTestBase { + + private static final Logger LOG = LoggerFactory.getLogger(FromClientSideTest4.class); + + protected FromClientSideTest4(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + /** + * Test batch operations with combination of valid and invalid args + */ + @TestTemplate + public void testBatchOperationsWithErrors() throws Exception { + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 10); + try (Connection conn = getConnection(); Table foo = conn.getTable(tableName)) { + int NUM_OPS = 100; + + // 1.1 Put with no column families (local validation, runtime exception) + List puts = new ArrayList<>(NUM_OPS); + for (int i = 0; i != NUM_OPS; i++) { + Put put = new Put(Bytes.toBytes(i)); + puts.add(put); + } + assertThrows(IllegalArgumentException.class, () -> foo.put(puts)); + assertEquals(NUM_OPS, puts.size()); + + // 1.2 Put with invalid column family + puts.clear(); + for (int i = 0; i < NUM_OPS; i++) { + Put put = new Put(Bytes.toBytes(i)); + put.addColumn((i % 2) == 0 ? FAMILY : INVALID_FAMILY, FAMILY, Bytes.toBytes(i)); + puts.add(put); + } + + RetriesExhaustedException ree = + assertThrows(RetriesExhaustedException.class, () -> foo.put(puts)); + assertThat(ree.getCause(), instanceOf(NoSuchColumnFamilyException.class)); + + // 2.1 Get non-existent rows + List gets = new ArrayList<>(NUM_OPS); + for (int i = 0; i < NUM_OPS; i++) { + Get get = new Get(Bytes.toBytes(i)); + gets.add(get); + } + Result[] getsResult = foo.get(gets); + assertNotNull(getsResult); + assertEquals(NUM_OPS, getsResult.length); + for (int i = 0; i < NUM_OPS; i++) { + Result getResult = getsResult[i]; + if (i % 2 == 0) { + assertFalse(getResult.isEmpty()); + } else { + assertTrue(getResult.isEmpty()); + } + } + + // 2.2 Get with invalid column family + gets.clear(); + for (int i = 0; i < NUM_OPS; i++) { + Get get = new Get(Bytes.toBytes(i)); + get.addColumn((i % 2) == 0 ? FAMILY : INVALID_FAMILY, FAMILY); + gets.add(get); + } + ree = assertThrows(RetriesExhaustedException.class, () -> foo.get(gets)); + assertThat(ree.getCause(), instanceOf(NoSuchColumnFamilyException.class)); + + // 3.1 Delete with invalid column family + List deletes = new ArrayList<>(NUM_OPS); + for (int i = 0; i < NUM_OPS; i++) { + Delete delete = new Delete(Bytes.toBytes(i)); + delete.addColumn((i % 2) == 0 ? FAMILY : INVALID_FAMILY, FAMILY); + deletes.add(delete); + } + ree = assertThrows(RetriesExhaustedException.class, () -> foo.delete(deletes)); + assertThat(ree.getCause(), instanceOf(NoSuchColumnFamilyException.class)); + + // all valid rows should have been deleted + gets.clear(); + for (int i = 0; i < NUM_OPS; i++) { + Get get = new Get(Bytes.toBytes(i)); + gets.add(get); + } + getsResult = foo.get(gets); + assertNotNull(getsResult); + assertEquals(NUM_OPS, getsResult.length); + for (Result getResult : getsResult) { + assertTrue(getResult.isEmpty()); + } + + // 3.2 Delete non-existent rows + deletes.clear(); + for (int i = 0; i < NUM_OPS; i++) { + Delete delete = new Delete(Bytes.toBytes(i)); + deletes.add(delete); + } + foo.delete(deletes); + } + } + + // + // JIRA Testers + // + + /** + * HBASE-867 If millions of columns in a column family, hbase scanner won't come up Test will + * create numRows rows, each with numColsPerRow columns (1 version each), and attempt to scan them + * all. To test at scale, up numColsPerRow to the millions (have not gotten that to work running + * as junit though) + */ + @TestTemplate + public void testJiraTest867() throws Exception { + int numRows = 10; + int numColsPerRow = 2000; + + byte[][] ROWS = makeN(ROW, numRows); + byte[][] QUALIFIERS = makeN(QUALIFIER, numColsPerRow); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + // Insert rows + for (int i = 0; i < numRows; i++) { + Put put = new Put(ROWS[i]); + put.setDurability(Durability.SKIP_WAL); + for (int j = 0; j < numColsPerRow; j++) { + put.addColumn(FAMILY, QUALIFIERS[j], QUALIFIERS[j]); + } + assertEquals(put.size(), numColsPerRow, "Put expected to contain " + numColsPerRow + + " columns but " + "only contains " + put.size()); + ht.put(put); + } + + // Get a row + Get get = new Get(ROWS[numRows - 1]); + Result result = ht.get(get); + assertNumKeys(result, numColsPerRow); + Cell[] keys = result.rawCells(); + for (int i = 0; i < result.size(); i++) { + assertKey(keys[i], ROWS[numRows - 1], FAMILY, QUALIFIERS[i], QUALIFIERS[i]); + } + + // Scan the rows + Scan scan = new Scan(); + try (ResultScanner scanner = ht.getScanner(scan)) { + int rowCount = 0; + while ((result = scanner.next()) != null) { + assertNumKeys(result, numColsPerRow); + Cell[] kvs = result.rawCells(); + for (int i = 0; i < numColsPerRow; i++) { + assertKey(kvs[i], ROWS[rowCount], FAMILY, QUALIFIERS[i], QUALIFIERS[i]); + } + rowCount++; + } + assertEquals(rowCount, numRows, + "Expected to scan " + numRows + " rows but actually scanned " + rowCount + " rows"); + } + + // flush and try again + + TEST_UTIL.flush(); + + // Get a row + get = new Get(ROWS[numRows - 1]); + result = ht.get(get); + assertNumKeys(result, numColsPerRow); + keys = result.rawCells(); + for (int i = 0; i < result.size(); i++) { + assertKey(keys[i], ROWS[numRows - 1], FAMILY, QUALIFIERS[i], QUALIFIERS[i]); + } + + // Scan the rows + scan = new Scan(); + try (ResultScanner scanner = ht.getScanner(scan)) { + int rowCount = 0; + while ((result = scanner.next()) != null) { + assertNumKeys(result, numColsPerRow); + Cell[] kvs = result.rawCells(); + for (int i = 0; i < numColsPerRow; i++) { + assertKey(kvs[i], ROWS[rowCount], FAMILY, QUALIFIERS[i], QUALIFIERS[i]); + } + rowCount++; + } + assertEquals(rowCount, numRows, + "Expected to scan " + numRows + " rows but actually scanned " + rowCount + " rows"); + } + } + } + + /** + * HBASE-861 get with timestamp will return a value if there is a version with an earlier + * timestamp + */ + @TestTemplate + public void testJiraTest861() throws Exception { + byte[][] VALUES = makeNAscii(VALUE, 7); + long[] STAMPS = makeStamps(7); + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + // Insert three versions + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + ht.put(put); + + // Get the middle value + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + + // Try to get one version before (expect fail) + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[1]); + + // Try to get one version after (expect fail) + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[5]); + + // Try same from storefile + TEST_UTIL.flush(); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[1]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[5]); + + // Insert two more versions surrounding others, into memstore + put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[6], VALUES[6]); + ht.put(put); + + // Check we can get everything we should and can't get what we shouldn't + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[1]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[5]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[6], VALUES[6]); + + // Try same from two storefiles + TEST_UTIL.flush(); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[1]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[5]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[6], VALUES[6]); + } + } + + /** + * HBASE-33 Add a HTable get/obtainScanner method that retrieves all versions of a particular + * column and row between two timestamps + */ + @TestTemplate + public void testJiraTest33() throws Exception { + byte[][] VALUES = makeNAscii(VALUE, 7); + long[] STAMPS = makeStamps(7); + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + // Insert lots versions + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); + ht.put(put); + + getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 2); + getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); + getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 3); + + scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 2); + scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); + scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 3); + + // Try same from storefile + TEST_UTIL.flush(); + + getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 2); + getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); + getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 3); + + scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 2); + scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); + scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 3); + } + } + + /** + * HBASE-1014 commit(BatchUpdate) method should return timestamp + */ + @TestTemplate + public void testJiraTest1014() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + long manualStamp = 12345; + + // Insert lots versions + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, manualStamp, VALUE); + ht.put(put); + + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, manualStamp, VALUE); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, manualStamp - 1); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, manualStamp + 1); + } + } + + /** + * HBASE-1182 Scan for columns > some timestamp + */ + @TestTemplate + public void testJiraTest1182() throws Exception { + byte[][] VALUES = makeNAscii(VALUE, 7); + long[] STAMPS = makeStamps(7); + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + // Insert lots versions + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); + ht.put(put); + + getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); + getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); + + scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); + scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); + + // Try same from storefile + TEST_UTIL.flush(); + + getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); + getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); + + scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); + scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); + } + } + + /** + * HBASE-52 Add a means of scanning over all versions + */ + @TestTemplate + public void testJiraTest52() throws Exception { + byte[][] VALUES = makeNAscii(VALUE, 7); + long[] STAMPS = makeStamps(7); + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + // Insert lots versions + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); + ht.put(put); + + getAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + + scanAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + + // Try same from storefile + TEST_UTIL.flush(); + + getAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + + scanAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); + } + } + + @TestTemplate + @SuppressWarnings("checkstyle:MethodLength") + public void testDuplicateVersions() throws Exception { + long[] STAMPS = makeStamps(20); + byte[][] VALUES = makeNAscii(VALUE, 20); + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + // Insert 4 versions of same column + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); + ht.put(put); + + // Verify we can get each one properly + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); + + // Verify we don't accidentally get others + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); + scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); + scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); + scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); + + // Ensure maxVersions in query is respected + Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(2); + Result result = ht.get(get); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[4], STAMPS[5] }, + new byte[][] { VALUES[4], VALUES[5] }, 0, 1); + + Scan scan = new Scan().withStartRow(ROW); + scan.addColumn(FAMILY, QUALIFIER); + scan.readVersions(2); + result = getSingleScanResult(ht, scan); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[4], STAMPS[5] }, + new byte[][] { VALUES[4], VALUES[5] }, 0, 1); + + // Flush and redo + + TEST_UTIL.flush(); + + // Verify we can get each one properly + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); + + // Verify we don't accidentally get others + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); + scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); + scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); + scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); + + // Ensure maxVersions in query is respected + get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(2); + result = ht.get(get); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[4], STAMPS[5] }, + new byte[][] { VALUES[4], VALUES[5] }, 0, 1); + + scan = new Scan().withStartRow(ROW); + scan.addColumn(FAMILY, QUALIFIER); + scan.readVersions(2); + result = getSingleScanResult(ht, scan); + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[4], STAMPS[5] }, + new byte[][] { VALUES[4], VALUES[5] }, 0, 1); + + // Add some memstore and retest + + // Insert 4 more versions of same column and a dupe + put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[14]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[6], VALUES[6]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[7], VALUES[7]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[8], VALUES[8]); + ht.put(put); + + // Ensure maxVersions in query is respected + get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(7); + result = ht.get(get); + assertNResult(result, ROW, FAMILY, QUALIFIER, + new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, + new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], + VALUES[8] }, + 0, 6); + + scan = new Scan().withStartRow(ROW); + scan.addColumn(FAMILY, QUALIFIER); + scan.readVersions(7); + result = getSingleScanResult(ht, scan); + assertNResult(result, ROW, FAMILY, QUALIFIER, + new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, + new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], + VALUES[8] }, + 0, 6); + + get = new Get(ROW); + get.readVersions(7); + result = ht.get(get); + assertNResult(result, ROW, FAMILY, QUALIFIER, + new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, + new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], + VALUES[8] }, + 0, 6); + + scan = new Scan().withStartRow(ROW); + scan.readVersions(7); + result = getSingleScanResult(ht, scan); + assertNResult(result, ROW, FAMILY, QUALIFIER, + new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, + new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], + VALUES[8] }, + 0, 6); + + // Verify we can get each one properly + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[14]); + getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[7], VALUES[7]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[14]); + scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[7], VALUES[7]); + + // Verify we don't accidentally get others + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); + getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[9]); + scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); + scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[9]); + + // Ensure maxVersions of table is respected + + TEST_UTIL.flush(); + + // Insert 4 more versions of same column and a dupe + put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, STAMPS[9], VALUES[9]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[11], VALUES[11]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[13], VALUES[13]); + put.addColumn(FAMILY, QUALIFIER, STAMPS[15], VALUES[15]); + ht.put(put); + + get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(Integer.MAX_VALUE); + result = ht.get(get); + assertNResult(result, ROW, FAMILY, QUALIFIER, + new long[] { STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], + STAMPS[11], STAMPS[13], STAMPS[15] }, + new byte[][] { VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], + VALUES[11], VALUES[13], VALUES[15] }, + 0, 9); + + scan = new Scan().withStartRow(ROW); + scan.addColumn(FAMILY, QUALIFIER); + scan.readVersions(Integer.MAX_VALUE); + result = getSingleScanResult(ht, scan); + assertNResult(result, ROW, FAMILY, QUALIFIER, + new long[] { STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], + STAMPS[11], STAMPS[13], STAMPS[15] }, + new byte[][] { VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], + VALUES[11], VALUES[13], VALUES[15] }, + 0, 9); + + // Delete a version in the memstore and a version in a storefile + Delete delete = new Delete(ROW); + delete.addColumn(FAMILY, QUALIFIER, STAMPS[11]); + delete.addColumn(FAMILY, QUALIFIER, STAMPS[7]); + ht.delete(delete); + + // Test that it's gone + get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(Integer.MAX_VALUE); + result = ht.get(get); + assertNResult(result, ROW, FAMILY, QUALIFIER, + new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], + STAMPS[9], STAMPS[13], STAMPS[15] }, + new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[8], + VALUES[9], VALUES[13], VALUES[15] }, + 0, 9); + + scan = new Scan().withStartRow(ROW); + scan.addColumn(FAMILY, QUALIFIER); + scan.readVersions(Integer.MAX_VALUE); + result = getSingleScanResult(ht, scan); + assertNResult(result, ROW, FAMILY, QUALIFIER, + new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], + STAMPS[9], STAMPS[13], STAMPS[15] }, + new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[8], + VALUES[9], VALUES[13], VALUES[15] }, + 0, 9); + } + } + + @TestTemplate + public void testUpdates() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table hTable = conn.getTable(tableName)) { + // Write a column with values at timestamp 1, 2 and 3 + byte[] row = Bytes.toBytes("row1"); + byte[] qualifier = Bytes.toBytes("myCol"); + Put put = new Put(row); + put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("AAA")); + hTable.put(put); + + put = new Put(row); + put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("BBB")); + hTable.put(put); + + put = new Put(row); + put.addColumn(FAMILY, qualifier, 3L, Bytes.toBytes("EEE")); + hTable.put(put); + + Get get = new Get(row); + get.addColumn(FAMILY, qualifier); + get.readAllVersions(); + + // Check that the column indeed has the right values at timestamps 1 and + // 2 + Result result = hTable.get(get); + NavigableMap navigableMap = result.getMap().get(FAMILY).get(qualifier); + assertEquals("AAA", Bytes.toString(navigableMap.get(1L))); + assertEquals("BBB", Bytes.toString(navigableMap.get(2L))); + + // Update the value at timestamp 1 + put = new Put(row); + put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("CCC")); + hTable.put(put); + + // Update the value at timestamp 2 + put = new Put(row); + put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("DDD")); + hTable.put(put); + + // Check that the values at timestamp 2 and 1 got updated + result = hTable.get(get); + navigableMap = result.getMap().get(FAMILY).get(qualifier); + assertEquals("CCC", Bytes.toString(navigableMap.get(1L))); + assertEquals("DDD", Bytes.toString(navigableMap.get(2L))); + } + } + + @TestTemplate + public void testUpdatesWithMajorCompaction() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table hTable = conn.getTable(tableName); + Admin admin = conn.getAdmin()) { + // Write a column with values at timestamp 1, 2 and 3 + byte[] row = Bytes.toBytes("row2"); + byte[] qualifier = Bytes.toBytes("myCol"); + Put put = new Put(row); + put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("AAA")); + hTable.put(put); + + put = new Put(row); + put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("BBB")); + hTable.put(put); + + put = new Put(row); + put.addColumn(FAMILY, qualifier, 3L, Bytes.toBytes("EEE")); + hTable.put(put); + + Get get = new Get(row); + get.addColumn(FAMILY, qualifier); + get.readAllVersions(); + + // Check that the column indeed has the right values at timestamps 1 and + // 2 + Result result = hTable.get(get); + NavigableMap navigableMap = result.getMap().get(FAMILY).get(qualifier); + assertEquals("AAA", Bytes.toString(navigableMap.get(1L))); + assertEquals("BBB", Bytes.toString(navigableMap.get(2L))); + + // Trigger a major compaction + admin.flush(tableName); + admin.majorCompact(tableName); + Thread.sleep(6000); + + // Update the value at timestamp 1 + put = new Put(row); + put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("CCC")); + hTable.put(put); + + // Update the value at timestamp 2 + put = new Put(row); + put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("DDD")); + hTable.put(put); + + // Trigger a major compaction + admin.flush(tableName); + admin.majorCompact(tableName); + Thread.sleep(6000); + + // Check that the values at timestamp 2 and 1 got updated + result = hTable.get(get); + navigableMap = result.getMap().get(FAMILY).get(qualifier); + assertEquals("CCC", Bytes.toString(navigableMap.get(1L))); + assertEquals("DDD", Bytes.toString(navigableMap.get(2L))); + } + } + + @TestTemplate + public void testMajorCompactionBetweenTwoUpdates() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY, 10); + try (Connection conn = getConnection(); Table hTable = conn.getTable(tableName); + Admin admin = conn.getAdmin()) { + // Write a column with values at timestamp 1, 2 and 3 + byte[] row = Bytes.toBytes("row3"); + byte[] qualifier = Bytes.toBytes("myCol"); + Put put = new Put(row); + put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("AAA")); + hTable.put(put); + + put = new Put(row); + put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("BBB")); + hTable.put(put); + + put = new Put(row); + put.addColumn(FAMILY, qualifier, 3L, Bytes.toBytes("EEE")); + hTable.put(put); + + Get get = new Get(row); + get.addColumn(FAMILY, qualifier); + get.readAllVersions(); + + // Check that the column indeed has the right values at timestamps 1 and + // 2 + Result result = hTable.get(get); + NavigableMap navigableMap = result.getMap().get(FAMILY).get(qualifier); + assertEquals("AAA", Bytes.toString(navigableMap.get(1L))); + assertEquals("BBB", Bytes.toString(navigableMap.get(2L))); + + // Trigger a major compaction + admin.flush(tableName); + admin.majorCompact(tableName); + Thread.sleep(6000); + + // Update the value at timestamp 1 + put = new Put(row); + put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("CCC")); + hTable.put(put); + + // Trigger a major compaction + admin.flush(tableName); + admin.majorCompact(tableName); + Thread.sleep(6000); + + // Update the value at timestamp 2 + put = new Put(row); + put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("DDD")); + hTable.put(put); + + // Trigger a major compaction + admin.flush(tableName); + admin.majorCompact(tableName); + Thread.sleep(6000); + + // Check that the values at timestamp 2 and 1 got updated + result = hTable.get(get); + navigableMap = result.getMap().get(FAMILY).get(qualifier); + + assertEquals("CCC", Bytes.toString(navigableMap.get(1L))); + assertEquals("DDD", Bytes.toString(navigableMap.get(2L))); + } + } + + @TestTemplate + public void testGetEmptyTable() throws IOException { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Get get = new Get(ROW); + get.addFamily(FAMILY); + Result r = table.get(get); + assertTrue(r.isEmpty()); + } + } + + @TestTemplate + public void testGetNullQualifier() throws IOException { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + put = new Put(ROW); + put.addColumn(FAMILY, null, VALUE); + table.put(put); + LOG.info("Row put"); + + Get get = new Get(ROW); + get.addColumn(FAMILY, null); + Result r = table.get(get); + assertEquals(1, r.size()); + + get = new Get(ROW); + get.addFamily(FAMILY); + r = table.get(get); + assertEquals(2, r.size()); + } + } + + @TestTemplate + public void testGetNonExistentRow() throws IOException { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + LOG.info("Row put"); + + Get get = new Get(ROW); + get.addFamily(FAMILY); + Result r = table.get(get); + assertFalse(r.isEmpty()); + System.out.println("Row retrieved successfully"); + + byte[] missingrow = Bytes.toBytes("missingrow"); + get = new Get(missingrow); + get.addFamily(FAMILY); + r = table.get(get); + assertTrue(r.isEmpty()); + LOG.info("Row missing as it should be"); + } + } + + @TestTemplate + public void testPut() throws IOException { + final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); + final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); + final byte[] row1 = Bytes.toBytes("row1"); + final byte[] row2 = Bytes.toBytes("row2"); + final byte[] value = Bytes.toBytes("abcd"); + TEST_UTIL.createTable(tableName, new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY }); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Put put = new Put(row1); + put.addColumn(CONTENTS_FAMILY, null, value); + table.put(put); + + put = new Put(row2); + put.addColumn(CONTENTS_FAMILY, null, value); + + assertEquals(1, put.size()); + assertEquals(1, put.getFamilyCellMap().get(CONTENTS_FAMILY).size()); + + // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO + KeyValue kv = (KeyValue) put.getFamilyCellMap().get(CONTENTS_FAMILY).get(0); + + assertTrue(Bytes.equals(CellUtil.cloneFamily(kv), CONTENTS_FAMILY)); + // will it return null or an empty byte array? + assertTrue(Bytes.equals(CellUtil.cloneQualifier(kv), new byte[0])); + + assertTrue(Bytes.equals(CellUtil.cloneValue(kv), value)); + + table.put(put); + + Scan scan = new Scan(); + scan.addColumn(CONTENTS_FAMILY, null); + try (ResultScanner scanner = table.getScanner(scan)) { + for (Result r : scanner) { + for (Cell key : r.rawCells()) { + System.out.println(Bytes.toString(r.getRow()) + ": " + key.toString()); + } + } + } + } + } + + @TestTemplate + public void testPutNoCF() throws IOException { + final byte[] BAD_FAM = Bytes.toBytes("BAD_CF"); + final byte[] VAL = Bytes.toBytes(100); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + assertThrows(NoSuchColumnFamilyException.class, + () -> table.put(new Put(ROW).addColumn(BAD_FAM, QUALIFIER, VAL)), + "Should throw NoSuchColumnFamilyException"); + } + } + + @TestTemplate + public void testRowsPut() throws IOException { + final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); + final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); + final int NB_BATCH_ROWS = 10; + final byte[] value = Bytes.toBytes("abcd"); + TEST_UTIL.createTable(tableName, new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY }); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + ArrayList rowsUpdate = new ArrayList<>(); + for (int i = 0; i < NB_BATCH_ROWS; i++) { + byte[] row = Bytes.toBytes("row" + i); + Put put = new Put(row); + put.setDurability(Durability.SKIP_WAL); + put.addColumn(CONTENTS_FAMILY, null, value); + rowsUpdate.add(put); + } + table.put(rowsUpdate); + Scan scan = new Scan(); + scan.addFamily(CONTENTS_FAMILY); + try (ResultScanner scanner = table.getScanner(scan)) { + int nbRows = Iterables.size(scanner); + assertEquals(NB_BATCH_ROWS, nbRows); + } + } + } + + @TestTemplate + public void testRowsPutBufferedManyManyFlushes() throws IOException { + final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); + final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); + final byte[] value = Bytes.toBytes("abcd"); + final int NB_BATCH_ROWS = 10; + TEST_UTIL.createTable(tableName, new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY }); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + ArrayList rowsUpdate = new ArrayList<>(); + for (int i = 0; i < NB_BATCH_ROWS * 10; i++) { + byte[] row = Bytes.toBytes("row" + i); + Put put = new Put(row); + put.setDurability(Durability.SKIP_WAL); + put.addColumn(CONTENTS_FAMILY, null, value); + rowsUpdate.add(put); + } + table.put(rowsUpdate); + + Scan scan = new Scan(); + scan.addFamily(CONTENTS_FAMILY); + try (ResultScanner scanner = table.getScanner(scan)) { + int nbRows = Iterables.size(scanner); + assertEquals(NB_BATCH_ROWS * 10, nbRows); + } + } + } + + /** + * test for HBASE-737 + */ + @TestTemplate + public void testHBase737() throws IOException { + final byte[] FAM1 = Bytes.toBytes("fam1"); + final byte[] FAM2 = Bytes.toBytes("fam2"); + TEST_UTIL.createTable(tableName, new byte[][] { FAM1, FAM2 }); + // Open table + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + // Insert some values + Put put = new Put(ROW); + put.addColumn(FAM1, Bytes.toBytes("letters"), Bytes.toBytes("abcdefg")); + table.put(put); + Threads.sleepWithoutInterrupt(1000); + + put = new Put(ROW); + put.addColumn(FAM1, Bytes.toBytes("numbers"), Bytes.toBytes("123456")); + table.put(put); + + Threads.sleepWithoutInterrupt(1000); + + put = new Put(ROW); + put.addColumn(FAM2, Bytes.toBytes("letters"), Bytes.toBytes("hijklmnop")); + table.put(put); + + long[] times = new long[3]; + + // First scan the memstore + + Scan scan = new Scan(); + scan.addFamily(FAM1); + scan.addFamily(FAM2); + try (ResultScanner s = table.getScanner(scan)) { + int index = 0; + Result r; + while ((r = s.next()) != null) { + for (Cell key : r.rawCells()) { + times[index++] = key.getTimestamp(); + } + } + } + for (int i = 0; i < times.length - 1; i++) { + for (int j = i + 1; j < times.length; j++) { + assertTrue(times[j] > times[i]); + } + } + + // Flush data to disk and try again + TEST_UTIL.flush(); + + // Reset times + Arrays.fill(times, 0); + + Threads.sleepWithoutInterrupt(1000); + + scan = new Scan(); + scan.addFamily(FAM1); + scan.addFamily(FAM2); + try (ResultScanner s = table.getScanner(scan)) { + int index = 0; + Result r = null; + while ((r = s.next()) != null) { + for (Cell key : r.rawCells()) { + times[index++] = key.getTimestamp(); + } + } + for (int i = 0; i < times.length - 1; i++) { + for (int j = i + 1; j < times.length; j++) { + assertTrue(times[j] > times[i]); + } + } + } + } + } + + @TestTemplate + public void testListTables() throws IOException { + final String testTableName = tableName.toString(); + final TableName tableName1 = TableName.valueOf(testTableName + "1"); + final TableName tableName2 = TableName.valueOf(testTableName + "2"); + final TableName tableName3 = TableName.valueOf(testTableName + "3"); + TableName[] tables = new TableName[] { tableName1, tableName2, tableName3 }; + for (TableName table : tables) { + TEST_UTIL.createTable(table, FAMILY); + } + try (Connection conn = getConnection(); Admin admin = conn.getAdmin()) { + List ts = admin.listTableDescriptors(); + HashSet result = new HashSet<>(ts); + int size = result.size(); + assertTrue(size >= tables.length); + for (TableName table : tables) { + boolean found = false; + for (TableDescriptor t : ts) { + if (t.getTableName().equals(table)) { + found = true; + break; + } + } + assertTrue(found, "Not found: " + table); + } + } + } + + @TestTemplate + public void testMiscHTableStuff() throws IOException { + final String testTableName = tableName.toString(); + final TableName tableAname = TableName.valueOf(testTableName + "A"); + final TableName tableBname = TableName.valueOf(testTableName + "B"); + final byte[] attrName = Bytes.toBytes("TESTATTR"); + final byte[] attrValue = Bytes.toBytes("somevalue"); + byte[] value = Bytes.toBytes("value"); + TEST_UTIL.createTable(tableAname, HConstants.CATALOG_FAMILY); + TEST_UTIL.createTable(tableBname, HConstants.CATALOG_FAMILY); + try (Connection conn = getConnection(); Table a = conn.getTable(tableAname); + Table b = conn.getTable(tableBname)) { + Put put = new Put(ROW); + put.addColumn(HConstants.CATALOG_FAMILY, null, value); + a.put(put); + + // open a new connection to A and a connection to b + try (Connection c = getConnection(); Table newA = c.getTable(tableAname)) { + // copy data from A to B + Scan scan = new Scan(); + scan.addFamily(HConstants.CATALOG_FAMILY); + try (ResultScanner s = newA.getScanner(scan)) { + for (Result r : s) { + put = new Put(r.getRow()); + put.setDurability(Durability.SKIP_WAL); + for (Cell kv : r.rawCells()) { + put.add(kv); + } + b.put(put); + } + } + } + + // Opening a new connection to A will cause the tables to be reloaded + try (Connection c = getConnection(); Table anotherA = c.getTable(tableAname)) { + Get get = new Get(ROW); + get.addFamily(HConstants.CATALOG_FAMILY); + anotherA.get(get); + } + + // We can still access A through newA because it has the table information + // cached. And if it needs to recalibrate, that will cause the information + // to be reloaded. + + // Test user metadata + Admin admin = TEST_UTIL.getAdmin(); + // make a modifiable descriptor + TableDescriptor desc = a.getDescriptor(); + // offline the table + admin.disableTable(tableAname); + // add a user attribute to HTD + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(desc).setValue(attrName, attrValue); + // add a user attribute to HCD + for (ColumnFamilyDescriptor c : desc.getColumnFamilies()) { + builder.modifyColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(c).setValue(attrName, attrValue).build()); + } + // update metadata for all regions of this table + admin.modifyTable(builder.build()); + // enable the table + admin.enableTable(tableAname); + + // Test that attribute changes were applied + desc = a.getDescriptor(); + assertEquals(desc.getTableName(), tableAname, "wrong table descriptor returned"); + // check HTD attribute + value = desc.getValue(attrName); + assertNotNull(value, "missing HTD attribute value"); + assertFalse(Bytes.compareTo(value, attrValue) != 0, "HTD attribute value is incorrect"); + // check HCD attribute + for (ColumnFamilyDescriptor c : desc.getColumnFamilies()) { + value = c.getValue(attrName); + assertNotNull(value, "missing HCD attribute value"); + assertFalse(Bytes.compareTo(value, attrValue) != 0, "HCD attribute value is incorrect"); + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest5.java new file mode 100644 index 000000000000..59469b47d382 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTest5.java @@ -0,0 +1,2745 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.awaitility.Awaitility.await; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.PrivateCellUtil; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Scan.ReadType; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.InclusiveStopFilter; +import org.apache.hadoop.hbase.filter.KeyOnlyFilter; +import org.apache.hadoop.hbase.filter.QualifierFilter; +import org.apache.hadoop.hbase.filter.RegexStringComparator; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.filter.SubstringComparator; +import org.apache.hadoop.hbase.filter.ValueFilter; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.io.hfile.BlockCache; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.TestTemplate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse; + +/** + * Run tests that use the HBase clients; {@link Table}. Sets up the HBase mini cluster once at start + * and runs through all client tests. Each creates a table named for the method and does its stuff + * against that. Parameterized to run with different registry implementations. + */ +public class FromClientSideTest5 extends FromClientSideTestBase { + + protected FromClientSideTest5(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide5.class); + + @TestTemplate + public void testGetClosestRowBefore() throws IOException, InterruptedException { + + final byte[] firstRow = Bytes.toBytes("row111"); + final byte[] secondRow = Bytes.toBytes("row222"); + final byte[] thirdRow = Bytes.toBytes("row333"); + final byte[] forthRow = Bytes.toBytes("row444"); + final byte[] beforeFirstRow = Bytes.toBytes("row"); + final byte[] beforeSecondRow = Bytes.toBytes("row22"); + final byte[] beforeThirdRow = Bytes.toBytes("row33"); + final byte[] beforeForthRow = Bytes.toBytes("row44"); + + try ( + Table table = TEST_UTIL.createTable(tableName, + new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 1024); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { + + // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow + // in Store.rowAtOrBeforeFromStoreFile + String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + Put put1 = new Put(firstRow); + Put put2 = new Put(secondRow); + Put put3 = new Put(thirdRow); + Put put4 = new Put(forthRow); + byte[] one = new byte[] { 1 }; + byte[] two = new byte[] { 2 }; + byte[] three = new byte[] { 3 }; + byte[] four = new byte[] { 4 }; + + put1.addColumn(HConstants.CATALOG_FAMILY, null, one); + put2.addColumn(HConstants.CATALOG_FAMILY, null, two); + put3.addColumn(HConstants.CATALOG_FAMILY, null, three); + put4.addColumn(HConstants.CATALOG_FAMILY, null, four); + table.put(put1); + table.put(put2); + table.put(put3); + table.put(put4); + region.flush(true); + + Result result; + + // Test before first that null is returned + result = getReverseScanResult(table, beforeFirstRow); + assertNull(result); + + // Test at first that first is returned + result = getReverseScanResult(table, firstRow); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), firstRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); + + // Test in between first and second that first is returned + result = getReverseScanResult(table, beforeSecondRow); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), firstRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); + + // Test at second make sure second is returned + result = getReverseScanResult(table, secondRow); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), secondRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); + + // Test in second and third, make sure second is returned + result = getReverseScanResult(table, beforeThirdRow); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), secondRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); + + // Test at third make sure third is returned + result = getReverseScanResult(table, thirdRow); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), thirdRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); + + // Test in third and forth, make sure third is returned + result = getReverseScanResult(table, beforeForthRow); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), thirdRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); + + // Test at forth make sure forth is returned + result = getReverseScanResult(table, forthRow); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), forthRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); + + // Test after forth make sure forth is returned + result = getReverseScanResult(table, Bytes.add(forthRow, one)); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), forthRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); + } + } + + private Result getReverseScanResult(Table table, byte[] row) throws IOException { + Scan scan = new Scan().withStartRow(row); + scan.setReadType(ReadType.PREAD); + scan.setReversed(true); + scan.setCaching(1); + scan.addFamily(HConstants.CATALOG_FAMILY); + try (ResultScanner scanner = table.getScanner(scan)) { + return scanner.next(); + } + } + + @TestTemplate + public void testMultiRowMutation() throws Exception { + LOG.info("Starting testMultiRowMutation"); + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] ROW3 = Bytes.toBytes("testRow3"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + // Add initial data + t.batch(Arrays.asList(new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE), + new Put(ROW2).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(1L)), + new Put(ROW3).addColumn(FAMILY, QUALIFIER, VALUE)), new Object[3]); + + // Execute MultiRowMutation + Put put = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put); + + Delete delete = new Delete(ROW1); + MutationProto m2 = ProtobufUtil.toMutation(MutationType.DELETE, delete); + + Increment increment = new Increment(ROW2).addColumn(FAMILY, QUALIFIER, 1L); + MutationProto m3 = ProtobufUtil.toMutation(MutationType.INCREMENT, increment); + + Append append = new Append(ROW3).addColumn(FAMILY, QUALIFIER, VALUE); + MutationProto m4 = ProtobufUtil.toMutation(MutationType.APPEND, append); + + MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); + mrmBuilder.addMutationRequest(m1); + mrmBuilder.addMutationRequest(m2); + mrmBuilder.addMutationRequest(m3); + mrmBuilder.addMutationRequest(m4); + + CoprocessorRpcChannel channel = t.coprocessorService(ROW); + MultiRowMutationService.BlockingInterface service = + MultiRowMutationService.newBlockingStub(channel); + MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); + + // Assert + assertTrue(response.getProcessed()); + + Result r = t.get(new Get(ROW)); + assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + + r = t.get(new Get(ROW1)); + assertTrue(r.isEmpty()); + + r = t.get(new Get(ROW2)); + assertEquals(2L, Bytes.toLong(r.getValue(FAMILY, QUALIFIER))); + + r = t.get(new Get(ROW3)); + assertEquals(Bytes.toString(VALUE) + Bytes.toString(VALUE), + Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + } + } + + @TestTemplate + public void testMultiRowMutationWithSingleConditionWhenConditionMatches() throws Exception { + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + // Add initial data + t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2)); + + // Execute MultiRowMutation with conditions + Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); + Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); + MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); + Delete delete = new Delete(ROW2); + MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); + + MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); + mrmBuilder.addMutationRequest(m1); + mrmBuilder.addMutationRequest(m2); + mrmBuilder.addMutationRequest(m3); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2, null)); + + CoprocessorRpcChannel channel = t.coprocessorService(ROW); + MultiRowMutationService.BlockingInterface service = + MultiRowMutationService.newBlockingStub(channel); + MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); + + // Assert + assertTrue(response.getProcessed()); + + Result r = t.get(new Get(ROW)); + assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + + r = t.get(new Get(ROW1)); + assertEquals(Bytes.toString(VALUE1), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + + r = t.get(new Get(ROW2)); + assertTrue(r.isEmpty()); + } + } + + @TestTemplate + public void testMultiRowMutationWithSingleConditionWhenConditionNotMatch() throws Exception { + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + // Add initial data + t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2)); + + // Execute MultiRowMutation with conditions + Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); + Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); + MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); + Delete delete = new Delete(ROW2); + MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); + + MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); + mrmBuilder.addMutationRequest(m1); + mrmBuilder.addMutationRequest(m2); + mrmBuilder.addMutationRequest(m3); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE1, null)); + + CoprocessorRpcChannel channel = t.coprocessorService(ROW); + MultiRowMutationService.BlockingInterface service = + MultiRowMutationService.newBlockingStub(channel); + MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); + + // Assert + assertFalse(response.getProcessed()); + + Result r = t.get(new Get(ROW)); + assertTrue(r.isEmpty()); + + r = t.get(new Get(ROW1)); + assertTrue(r.isEmpty()); + + r = t.get(new Get(ROW2)); + assertEquals(Bytes.toString(VALUE2), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + } + } + + @TestTemplate + public void testMultiRowMutationWithMultipleConditionsWhenConditionsMatch() throws Exception { + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + // Add initial data + t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2)); + + // Execute MultiRowMutation with conditions + Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); + Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); + MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); + Delete delete = new Delete(ROW2); + MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); + + MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); + mrmBuilder.addMutationRequest(m1); + mrmBuilder.addMutationRequest(m2); + mrmBuilder.addMutationRequest(m3); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW, FAMILY, QUALIFIER, CompareOperator.EQUAL, null, null)); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2, null)); + + CoprocessorRpcChannel channel = t.coprocessorService(ROW); + MultiRowMutationService.BlockingInterface service = + MultiRowMutationService.newBlockingStub(channel); + MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); + + // Assert + assertTrue(response.getProcessed()); + + Result r = t.get(new Get(ROW)); + assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + + r = t.get(new Get(ROW1)); + assertEquals(Bytes.toString(VALUE1), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + + r = t.get(new Get(ROW2)); + assertTrue(r.isEmpty()); + } + } + + @TestTemplate + public void testMultiRowMutationWithMultipleConditionsWhenConditionsNotMatch() throws Exception { + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + // Add initial data + t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2)); + + // Execute MultiRowMutation with conditions + Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); + Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); + MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); + Delete delete = new Delete(ROW2); + MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); + + MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); + mrmBuilder.addMutationRequest(m1); + mrmBuilder.addMutationRequest(m2); + mrmBuilder.addMutationRequest(m3); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW1, FAMILY, QUALIFIER, CompareOperator.EQUAL, null, null)); + mrmBuilder.addCondition( + ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE1, null)); + + CoprocessorRpcChannel channel = t.coprocessorService(ROW); + MultiRowMutationService.BlockingInterface service = + MultiRowMutationService.newBlockingStub(channel); + MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); + + // Assert + assertFalse(response.getProcessed()); + + Result r = t.get(new Get(ROW)); + assertTrue(r.isEmpty()); + + r = t.get(new Get(ROW1)); + assertTrue(r.isEmpty()); + + r = t.get(new Get(ROW2)); + assertEquals(Bytes.toString(VALUE2), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + } + } + + @TestTemplate + public void testMultiRowMutationWithFilterConditionWhenConditionMatches() throws Exception { + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] QUALIFIER2 = Bytes.toBytes("testQualifier2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); + final byte[] VALUE3 = Bytes.toBytes("testValue3"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + // Add initial data + t.put( + new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2).addColumn(FAMILY, QUALIFIER2, VALUE3)); + + // Execute MultiRowMutation with conditions + Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); + Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); + MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); + Delete delete = new Delete(ROW2); + MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); + + MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); + mrmBuilder.addMutationRequest(m1); + mrmBuilder.addMutationRequest(m2); + mrmBuilder.addMutationRequest(m3); + mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, + new FilterList( + new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2), + new SingleColumnValueFilter(FAMILY, QUALIFIER2, CompareOperator.EQUAL, VALUE3)), + null)); + + CoprocessorRpcChannel channel = t.coprocessorService(ROW); + MultiRowMutationService.BlockingInterface service = + MultiRowMutationService.newBlockingStub(channel); + MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); + + // Assert + assertTrue(response.getProcessed()); + + Result r = t.get(new Get(ROW)); + assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + + r = t.get(new Get(ROW1)); + assertEquals(Bytes.toString(VALUE1), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + + r = t.get(new Get(ROW2)); + assertTrue(r.isEmpty()); + } + } + + @TestTemplate + public void testMultiRowMutationWithFilterConditionWhenConditionNotMatch() throws Exception { + final byte[] ROW1 = Bytes.toBytes("testRow1"); + final byte[] ROW2 = Bytes.toBytes("testRow2"); + final byte[] QUALIFIER2 = Bytes.toBytes("testQualifier2"); + final byte[] VALUE1 = Bytes.toBytes("testValue1"); + final byte[] VALUE2 = Bytes.toBytes("testValue2"); + final byte[] VALUE3 = Bytes.toBytes("testValue3"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + // Add initial data + t.put( + new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2).addColumn(FAMILY, QUALIFIER2, VALUE3)); + + // Execute MultiRowMutation with conditions + Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); + MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); + Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); + MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); + Delete delete = new Delete(ROW2); + MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); + + MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); + mrmBuilder.addMutationRequest(m1); + mrmBuilder.addMutationRequest(m2); + mrmBuilder.addMutationRequest(m3); + mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, + new FilterList( + new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2), + new SingleColumnValueFilter(FAMILY, QUALIFIER2, CompareOperator.EQUAL, VALUE2)), + null)); + + CoprocessorRpcChannel channel = t.coprocessorService(ROW); + MultiRowMutationService.BlockingInterface service = + MultiRowMutationService.newBlockingStub(channel); + MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); + + // Assert + assertFalse(response.getProcessed()); + + Result r = t.get(new Get(ROW)); + assertTrue(r.isEmpty()); + + r = t.get(new Get(ROW1)); + assertTrue(r.isEmpty()); + + r = t.get(new Get(ROW2)); + assertEquals(Bytes.toString(VALUE2), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); + } + } + + @TestTemplate + public void testRowMutations() throws Exception { + LOG.info("Starting testRowMutations"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + byte[][] QUALIFIERS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), + Bytes.toBytes("c"), Bytes.toBytes("d") }; + + // Test for Put operations + RowMutations arm = new RowMutations(ROW); + Put p = new Put(ROW); + p.addColumn(FAMILY, QUALIFIERS[0], VALUE); + arm.add(p); + Result r = t.mutateRow(arm); + assertTrue(r.getExists()); + assertTrue(r.isEmpty()); + + Get g = new Get(ROW); + r = t.get(g); + assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0]))); + + // Test for Put and Delete operations + arm = new RowMutations(ROW); + p = new Put(ROW); + p.addColumn(FAMILY, QUALIFIERS[1], VALUE); + arm.add(p); + Delete d = new Delete(ROW); + d.addColumns(FAMILY, QUALIFIERS[0]); + arm.add(d); + // TODO: Trying mutateRow again. The batch was failing with a one try only. + r = t.mutateRow(arm); + assertTrue(r.getExists()); + assertTrue(r.isEmpty()); + + r = t.get(g); + assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1]))); + assertNull(r.getValue(FAMILY, QUALIFIERS[0])); + + // Test for Increment and Append operations + arm = new RowMutations(ROW); + arm.add(Arrays.asList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE), + new Delete(ROW).addColumns(FAMILY, QUALIFIERS[1]), + new Increment(ROW).addColumn(FAMILY, QUALIFIERS[2], 5L), + new Append(ROW).addColumn(FAMILY, QUALIFIERS[3], Bytes.toBytes("abc")))); + r = t.mutateRow(arm); + assertTrue(r.getExists()); + assertEquals(5L, Bytes.toLong(r.getValue(FAMILY, QUALIFIERS[2]))); + assertEquals("abc", Bytes.toString(r.getValue(FAMILY, QUALIFIERS[3]))); + + g = new Get(ROW); + r = t.get(g); + assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0]))); + assertNull(r.getValue(FAMILY, QUALIFIERS[1])); + assertEquals(5L, Bytes.toLong(r.getValue(FAMILY, QUALIFIERS[2]))); + assertEquals("abc", Bytes.toString(r.getValue(FAMILY, QUALIFIERS[3]))); + + // Test that we get a region level exception + RowMutations nceRm = new RowMutations(ROW); + p = new Put(ROW); + p.addColumn(new byte[] { 'b', 'o', 'g', 'u', 's' }, QUALIFIERS[0], VALUE); + nceRm.add(p); + Exception e = assertThrows(Exception.class, () -> t.mutateRow(nceRm), + "Expected NoSuchColumnFamilyException"); + if (!(e instanceof NoSuchColumnFamilyException)) { + assertThat(e, instanceOf(RetriesExhaustedWithDetailsException.class)); + List causes = ((RetriesExhaustedWithDetailsException) e).getCauses(); + assertThat(causes, hasItem(instanceOf(NoSuchColumnFamilyException.class))); + } + } + } + + @TestTemplate + public void testBatchAppendWithReturnResultFalse() throws Exception { + LOG.info("Starting testBatchAppendWithReturnResultFalse"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Append append1 = new Append(Bytes.toBytes("row1")); + append1.setReturnResults(false); + append1.addColumn(FAMILY, Bytes.toBytes("f1"), Bytes.toBytes("value1")); + Append append2 = new Append(Bytes.toBytes("row1")); + append2.setReturnResults(false); + append2.addColumn(FAMILY, Bytes.toBytes("f1"), Bytes.toBytes("value2")); + List appends = new ArrayList<>(); + appends.add(append1); + appends.add(append2); + Object[] results = new Object[2]; + table.batch(appends, results); + assertEquals(2, results.length); + for (Object r : results) { + Result result = (Result) r; + assertTrue(result.isEmpty()); + } + } + } + + @TestTemplate + public void testAppend() throws Exception { + LOG.info("Starting testAppend"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + byte[] v1 = Bytes.toBytes("42"); + byte[] v2 = Bytes.toBytes("23"); + byte[][] QUALIFIERS = + new byte[][] { Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("c") }; + Append a = new Append(ROW); + a.addColumn(FAMILY, QUALIFIERS[0], v1); + a.addColumn(FAMILY, QUALIFIERS[1], v2); + a.setReturnResults(false); + assertEmptyResult(t.append(a)); + + a = new Append(ROW); + a.addColumn(FAMILY, QUALIFIERS[0], v2); + a.addColumn(FAMILY, QUALIFIERS[1], v1); + a.addColumn(FAMILY, QUALIFIERS[2], v2); + Result r = t.append(a); + assertEquals(0, Bytes.compareTo(Bytes.add(v1, v2), r.getValue(FAMILY, QUALIFIERS[0]))); + assertEquals(0, Bytes.compareTo(Bytes.add(v2, v1), r.getValue(FAMILY, QUALIFIERS[1]))); + // QUALIFIERS[2] previously not exist, verify both value and timestamp are correct + assertEquals(0, Bytes.compareTo(v2, r.getValue(FAMILY, QUALIFIERS[2]))); + assertEquals(r.getColumnLatestCell(FAMILY, QUALIFIERS[0]).getTimestamp(), + r.getColumnLatestCell(FAMILY, QUALIFIERS[2]).getTimestamp()); + } + } + + private List doAppend(final boolean walUsed) throws IOException { + LOG.info("Starting testAppend, walUsed is " + walUsed); + TableName tableName = TableName.valueOf( + this.tableName.getNameAsString() + (walUsed ? "_testAppendWithWAL" : "testAppendWithoutWAL")); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + final byte[] row1 = Bytes.toBytes("c"); + final byte[] row2 = Bytes.toBytes("b"); + final byte[] row3 = Bytes.toBytes("a"); + final byte[] qual = Bytes.toBytes("qual"); + Put put_0 = new Put(row2); + put_0.addColumn(FAMILY, qual, Bytes.toBytes("put")); + Put put_1 = new Put(row3); + put_1.addColumn(FAMILY, qual, Bytes.toBytes("put")); + Append append_0 = new Append(row1); + append_0.addColumn(FAMILY, qual, Bytes.toBytes("i")); + Append append_1 = new Append(row1); + append_1.addColumn(FAMILY, qual, Bytes.toBytes("k")); + Append append_2 = new Append(row1); + append_2.addColumn(FAMILY, qual, Bytes.toBytes("e")); + if (!walUsed) { + append_2.setDurability(Durability.SKIP_WAL); + } + Append append_3 = new Append(row1); + append_3.addColumn(FAMILY, qual, Bytes.toBytes("a")); + Scan s = new Scan(); + s.setCaching(1); + t.append(append_0); + t.put(put_0); + t.put(put_1); + List results = new LinkedList<>(); + try (ResultScanner scanner = t.getScanner(s)) { + // get one row(should be row3) from the scanner to make sure that we have send a request to + // region server, which means we have already set the read point, so later we should not see + // the new appended values. + Result r = scanner.next(); + assertNotNull(r); + results.add(r); + t.append(append_1); + t.append(append_2); + t.append(append_3); + for (;;) { + r = scanner.next(); + if (r == null) { + break; + } + results.add(r); + } + } + return results; + } finally { + TEST_UTIL.deleteTable(tableName); + } + } + + @TestTemplate + public void testAppendWithoutWAL() throws Exception { + List resultsWithWal = doAppend(true); + List resultsWithoutWal = doAppend(false); + assertEquals(resultsWithWal.size(), resultsWithoutWal.size()); + for (int i = 0; i < resultsWithWal.size(); ++i) { + Result resultWithWal = resultsWithWal.get(i); + Result resultWithoutWal = resultsWithoutWal.get(i); + assertEquals(resultWithWal.rawCells().length, resultWithoutWal.rawCells().length); + for (int j = 0; j < resultWithWal.rawCells().length; ++j) { + Cell cellWithWal = resultWithWal.rawCells()[j]; + Cell cellWithoutWal = resultWithoutWal.rawCells()[j]; + assertArrayEquals(CellUtil.cloneRow(cellWithWal), CellUtil.cloneRow(cellWithoutWal)); + assertArrayEquals(CellUtil.cloneFamily(cellWithWal), CellUtil.cloneFamily(cellWithoutWal)); + assertArrayEquals(CellUtil.cloneQualifier(cellWithWal), + CellUtil.cloneQualifier(cellWithoutWal)); + assertArrayEquals(CellUtil.cloneValue(cellWithWal), CellUtil.cloneValue(cellWithoutWal)); + } + } + } + + @TestTemplate + public void testClientPoolRoundRobin() throws IOException { + int poolSize = 3; + int numVersions = poolSize * 2; + Configuration conf = TEST_UTIL.getConfiguration(); + conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "round-robin"); + conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize); + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, Integer.MAX_VALUE); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + final long ts = EnvironmentEdgeManager.currentTime(); + Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readAllVersions(); + + for (int versions = 1; versions <= numVersions; versions++) { + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, ts + versions, VALUE); + table.put(put); + + Result result = table.get(get); + NavigableMap navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); + + assertEquals(versions, navigableMap.size(), "The number of versions of '" + + Bytes.toString(FAMILY) + ":" + Bytes.toString(QUALIFIER) + " did not match"); + for (Map.Entry entry : navigableMap.entrySet()) { + assertTrue(Bytes.equals(VALUE, entry.getValue()), + "The value at time " + entry.getKey() + " did not match what was put"); + } + } + } + } + + @Disabled("Flakey: HBASE-8989") + @TestTemplate + public void testClientPoolThreadLocal() throws IOException { + int poolSize = Integer.MAX_VALUE; + int numVersions = 3; + Configuration conf = TEST_UTIL.getConfiguration(); + conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "thread-local"); + conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize); + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 3); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + final long ts = EnvironmentEdgeManager.currentTime(); + final Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readAllVersions(); + + for (int versions = 1; versions <= numVersions; versions++) { + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, ts + versions, VALUE); + table.put(put); + + Result result = table.get(get); + NavigableMap navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); + + assertEquals(versions, navigableMap.size(), "The number of versions of '" + + Bytes.toString(FAMILY) + ":" + Bytes.toString(QUALIFIER) + " did not match"); + for (Map.Entry entry : navigableMap.entrySet()) { + assertTrue(Bytes.equals(VALUE, entry.getValue()), + "The value at time " + entry.getKey() + " did not match what was put"); + } + } + + final Object waitLock = new Object(); + ExecutorService executorService = Executors.newFixedThreadPool(numVersions); + final AtomicReference error = new AtomicReference<>(null); + for (int versions = numVersions; versions < numVersions * 2; versions++) { + final int versionsCopy = versions; + executorService.submit((Callable) () -> { + try { + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, ts + versionsCopy, VALUE); + table.put(put); + + Result result = table.get(get); + NavigableMap navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); + + assertEquals(versionsCopy, navigableMap.size(), + "The number of versions of '" + Bytes.toString(FAMILY) + ":" + + Bytes.toString(QUALIFIER) + " did not match " + versionsCopy); + for (Map.Entry entry : navigableMap.entrySet()) { + assertTrue(Bytes.equals(VALUE, entry.getValue()), + "The value at time " + entry.getKey() + " did not match what was put"); + } + synchronized (waitLock) { + waitLock.wait(); + } + } catch (Exception ignored) { + } catch (AssertionError e) { + // the error happens in a thread, it won't fail the test, + // need to pass it to the caller for proper handling. + error.set(e); + LOG.error(e.toString(), e); + } + + return null; + }); + } + synchronized (waitLock) { + waitLock.notifyAll(); + } + executorService.shutdownNow(); + assertNull(error.get()); + } + } + + @TestTemplate + public void testCheckAndPut() throws IOException { + final byte[] anotherrow = Bytes.toBytes("anotherrow"); + final byte[] value2 = Bytes.toBytes("abcd"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Put put1 = new Put(ROW); + put1.addColumn(FAMILY, QUALIFIER, VALUE); + + // row doesn't exist, so using non-null value should be considered "not match". + boolean ok = + table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenPut(put1); + assertFalse(ok); + + // row doesn't exist, so using "ifNotExists" should be considered "match". + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put1); + assertTrue(ok); + + // row now exists, so using "ifNotExists" should be considered "not match". + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put1); + assertFalse(ok); + + Put put2 = new Put(ROW); + put2.addColumn(FAMILY, QUALIFIER, value2); + + // row now exists, use the matching value to check + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenPut(put2); + assertTrue(ok); + + Put put3 = new Put(anotherrow); + put3.addColumn(FAMILY, QUALIFIER, VALUE); + + // try to do CheckAndPut on different rows + try { + table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(value2).thenPut(put3); + fail("trying to check and modify different rows should have failed."); + } catch (Exception ignored) { + } + } + } + + @TestTemplate + public void testCheckAndMutateWithTimeRange() throws IOException { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + final long ts = EnvironmentEdgeManager.currentTime() / 2; + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, ts, VALUE); + + boolean ok = + table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put); + assertTrue(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .timeRange(TimeRange.at(ts + 10000)).ifEquals(VALUE).thenPut(put); + assertFalse(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .timeRange(TimeRange.from(ts + 10000)).ifEquals(VALUE).thenPut(put); + assertFalse(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .timeRange(TimeRange.between(ts + 10000, ts + 20000)).ifEquals(VALUE).thenPut(put); + assertFalse(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.until(ts)) + .ifEquals(VALUE).thenPut(put); + assertFalse(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) + .ifEquals(VALUE).thenPut(put); + assertTrue(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.from(ts)) + .ifEquals(VALUE).thenPut(put); + assertTrue(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .timeRange(TimeRange.between(ts, ts + 20000)).ifEquals(VALUE).thenPut(put); + assertTrue(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .timeRange(TimeRange.until(ts + 10000)).ifEquals(VALUE).thenPut(put); + assertTrue(ok); + + RowMutations rm = new RowMutations(ROW).add((Mutation) put); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .timeRange(TimeRange.at(ts + 10000)).ifEquals(VALUE).thenMutate(rm); + assertFalse(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) + .ifEquals(VALUE).thenMutate(rm); + assertTrue(ok); + + Delete delete = new Delete(ROW).addColumn(FAMILY, QUALIFIER); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .timeRange(TimeRange.at(ts + 10000)).ifEquals(VALUE).thenDelete(delete); + assertFalse(ok); + + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) + .ifEquals(VALUE).thenDelete(delete); + assertTrue(ok); + } + } + + @TestTemplate + public void testCheckAndPutWithCompareOp() throws IOException { + final byte[] value1 = Bytes.toBytes("aaaa"); + final byte[] value2 = Bytes.toBytes("bbbb"); + final byte[] value3 = Bytes.toBytes("cccc"); + final byte[] value4 = Bytes.toBytes("dddd"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Put put2 = new Put(ROW); + put2.addColumn(FAMILY, QUALIFIER, value2); + + Put put3 = new Put(ROW); + put3.addColumn(FAMILY, QUALIFIER, value3); + + // row doesn't exist, so using "ifNotExists" should be considered "match". + boolean ok = + table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put2); + assertTrue(ok); + + // cell = "bbbb", using "aaaa" to compare only LESS/LESS_OR_EQUAL/NOT_EQUAL + // turns out "match" + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER, value1).thenPut(put2); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.EQUAL, value1).thenPut(put2); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value1).thenPut(put2); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS, value1).thenPut(put2); + assertTrue(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS_OR_EQUAL, value1).thenPut(put2); + assertTrue(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.NOT_EQUAL, value1).thenPut(put3); + assertTrue(ok); + + // cell = "cccc", using "dddd" to compare only LARGER/LARGER_OR_EQUAL/NOT_EQUAL + // turns out "match" + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS, value4).thenPut(put3); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS_OR_EQUAL, value4).thenPut(put3); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.EQUAL, value4).thenPut(put3); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER, value4).thenPut(put3); + assertTrue(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value4).thenPut(put3); + assertTrue(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.NOT_EQUAL, value4).thenPut(put2); + assertTrue(ok); + + // cell = "bbbb", using "bbbb" to compare only GREATER_OR_EQUAL/LESS_OR_EQUAL/EQUAL + // turns out "match" + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER, value2).thenPut(put2); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.NOT_EQUAL, value2).thenPut(put2); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS, value2).thenPut(put2); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value2).thenPut(put2); + assertTrue(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS_OR_EQUAL, value2).thenPut(put2); + assertTrue(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.EQUAL, value2).thenPut(put3); + assertTrue(ok); + } + } + + @TestTemplate + public void testCheckAndDelete() throws IOException { + final byte[] value1 = Bytes.toBytes("aaaa"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, value1); + table.put(put); + + Delete delete = new Delete(ROW); + delete.addColumns(FAMILY, QUALIFIER); + + boolean ok = + table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(value1).thenDelete(delete); + assertTrue(ok); + } + } + + @TestTemplate + public void testCheckAndDeleteWithCompareOp() throws IOException { + final byte[] value1 = Bytes.toBytes("aaaa"); + final byte[] value2 = Bytes.toBytes("bbbb"); + final byte[] value3 = Bytes.toBytes("cccc"); + final byte[] value4 = Bytes.toBytes("dddd"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Put put2 = new Put(ROW); + put2.addColumn(FAMILY, QUALIFIER, value2); + table.put(put2); + + Put put3 = new Put(ROW); + put3.addColumn(FAMILY, QUALIFIER, value3); + + Delete delete = new Delete(ROW); + delete.addColumns(FAMILY, QUALIFIER); + + // cell = "bbbb", using "aaaa" to compare only LESS/LESS_OR_EQUAL/NOT_EQUAL + // turns out "match" + boolean ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER, value1).thenDelete(delete); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.EQUAL, value1).thenDelete(delete); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value1).thenDelete(delete); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS, value1).thenDelete(delete); + assertTrue(ok); + table.put(put2); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS_OR_EQUAL, value1).thenDelete(delete); + assertTrue(ok); + table.put(put2); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.NOT_EQUAL, value1).thenDelete(delete); + assertTrue(ok); + + // cell = "cccc", using "dddd" to compare only LARGER/LARGER_OR_EQUAL/NOT_EQUAL + // turns out "match" + table.put(put3); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS, value4).thenDelete(delete); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS_OR_EQUAL, value4).thenDelete(delete); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.EQUAL, value4).thenDelete(delete); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER, value4).thenDelete(delete); + assertTrue(ok); + table.put(put3); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value4).thenDelete(delete); + assertTrue(ok); + table.put(put3); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.NOT_EQUAL, value4).thenDelete(delete); + assertTrue(ok); + + // cell = "bbbb", using "bbbb" to compare only GREATER_OR_EQUAL/LESS_OR_EQUAL/EQUAL + // turns out "match" + table.put(put2); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER, value2).thenDelete(delete); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.NOT_EQUAL, value2).thenDelete(delete); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS, value2).thenDelete(delete); + assertFalse(ok); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.GREATER_OR_EQUAL, value2).thenDelete(delete); + assertTrue(ok); + table.put(put2); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.LESS_OR_EQUAL, value2).thenDelete(delete); + assertTrue(ok); + table.put(put2); + ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) + .ifMatches(CompareOperator.EQUAL, value2).thenDelete(delete); + assertTrue(ok); + } + } + + /** + * Test ScanMetrics + */ + @TestTemplate + @SuppressWarnings({ "unused", "checkstyle:EmptyBlock" }) + public void testScanMetrics() throws Exception { + // Set up test table: + // Create table: + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + int numOfRegions; + try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) { + numOfRegions = r.getStartKeys().length; + } + // Create 3 rows in the table, with rowkeys starting with "zzz*" so that + // scan are forced to hit all the regions. + Put put1 = new Put(Bytes.toBytes("zzz1")); + put1.addColumn(FAMILY, QUALIFIER, VALUE); + Put put2 = new Put(Bytes.toBytes("zzz2")); + put2.addColumn(FAMILY, QUALIFIER, VALUE); + Put put3 = new Put(Bytes.toBytes("zzz3")); + put3.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(Arrays.asList(put1, put2, put3)); + + Scan scan1 = new Scan(); + int numRecords = 0; + try (ResultScanner scanner = ht.getScanner(scan1)) { + for (Result result : scanner) { + numRecords++; + } + + LOG.info("test data has {} records.", numRecords); + + // by default, scan metrics collection is turned off + assertNull(scanner.getScanMetrics()); + } + + // turn on scan metrics + Scan scan2 = new Scan(); + scan2.setScanMetricsEnabled(true); + scan2.setCaching(numRecords + 1); + try (ResultScanner scanner = ht.getScanner(scan2)) { + for (Result result : scanner.next(numRecords - 1)) { + } + assertNotNull(scanner.getScanMetrics()); + } + + // set caching to 1, because metrics are collected in each roundtrip only + scan2 = new Scan(); + scan2.setScanMetricsEnabled(true); + scan2.setCaching(1); + try (ResultScanner scanner = ht.getScanner(scan2)) { + // per HBASE-5717, this should still collect even if you don't run all the way to + // the end of the scanner. So this is asking for 2 of the 3 rows we inserted. + for (Result result : scanner.next(numRecords - 1)) { + } + ScanMetrics scanMetrics = scanner.getScanMetrics(); + assertEquals(numOfRegions, scanMetrics.countOfRegions.get(), + "Did not access all the regions in the table"); + } + + // check byte counters + scan2 = new Scan(); + scan2.setScanMetricsEnabled(true); + scan2.setCaching(1); + try (ResultScanner scanner = ht.getScanner(scan2)) { + int numBytes = 0; + for (Result result : scanner) { + for (Cell cell : result.listCells()) { + numBytes += PrivateCellUtil.estimatedSerializedSizeOf(cell); + } + } + ScanMetrics scanMetrics = scanner.getScanMetrics(); + assertEquals(numBytes, scanMetrics.countOfBytesInResults.get(), + "Did not count the result bytes"); + } + + // check byte counters on a small scan + scan2 = new Scan(); + scan2.setScanMetricsEnabled(true); + scan2.setCaching(1); + scan2.setReadType(ReadType.PREAD); + try (ResultScanner scanner = ht.getScanner(scan2)) { + int numBytes = 0; + for (Result result : scanner) { + for (Cell cell : result.listCells()) { + numBytes += PrivateCellUtil.estimatedSerializedSizeOf(cell); + } + } + ScanMetrics scanMetrics = scanner.getScanMetrics(); + assertEquals(numBytes, scanMetrics.countOfBytesInResults.get(), + "Did not count the result bytes"); + } + + // now, test that the metrics are still collected even if you don't call close, but do + // run past the end of all the records + /** + * There seems to be a timing issue here. Comment out for now. Fix when time. Scan + * scanWithoutClose = new Scan(); scanWithoutClose.setCaching(1); + * scanWithoutClose.setScanMetricsEnabled(true); ResultScanner scannerWithoutClose = + * ht.getScanner(scanWithoutClose); for (Result result : scannerWithoutClose.next(numRecords + + * 1)) { } ScanMetrics scanMetricsWithoutClose = getScanMetrics(scanWithoutClose); + * assertEquals("Did not access all the regions in the table", numOfRegions, + * scanMetricsWithoutClose.countOfRegions.get()); + */ + + // finally, + // test that the metrics are collected correctly if you both run past all the records, + // AND close the scanner + Scan scanWithClose = new Scan(); + // make sure we can set caching up to the number of a scanned values + scanWithClose.setCaching(numRecords); + scanWithClose.setScanMetricsEnabled(true); + try (ResultScanner scannerWithClose = ht.getScanner(scanWithClose)) { + for (Result result : scannerWithClose.next(numRecords + 1)) { + } + scannerWithClose.close(); + ScanMetrics scanMetricsWithClose = scannerWithClose.getScanMetrics(); + assertEquals(numOfRegions, scanMetricsWithClose.countOfRegions.get(), + "Did not access all the regions in the table"); + } + } finally { + TEST_UTIL.deleteTable(tableName); + } + } + + /** + * Tests that cache on write works all the way up from the client-side. Performs inserts, flushes, + * and compactions, verifying changes in the block cache along the way. + */ + @TestTemplate + public void testCacheOnWriteEvictOnClose() throws Exception { + byte[] data = Bytes.toBytes("data"); + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName); + RegionLocator locator = conn.getRegionLocator(tableName)) { + // get the block cache and region + String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); + + HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HStore store = region.getStores().iterator().next(); + CacheConfig cacheConf = store.getCacheConfig(); + cacheConf.setCacheDataOnWrite(true); + cacheConf.setEvictOnClose(true); + BlockCache cache = cacheConf.getBlockCache().get(); + + // establish baseline stats + long startBlockCount = cache.getBlockCount(); + long startBlockHits = cache.getStats().getHitCount(); + long startBlockMiss = cache.getStats().getMissCount(); + + // wait till baseline is stable, (minimal 500 ms) + for (int i = 0; i < 5; i++) { + Thread.sleep(100); + if ( + startBlockCount != cache.getBlockCount() + || startBlockHits != cache.getStats().getHitCount() + || startBlockMiss != cache.getStats().getMissCount() + ) { + startBlockCount = cache.getBlockCount(); + startBlockHits = cache.getStats().getHitCount(); + startBlockMiss = cache.getStats().getMissCount(); + i = -1; + } + } + + // insert data + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, data); + table.put(put); + assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data)); + + // data was in memstore so don't expect any changes + assertEquals(startBlockCount, cache.getBlockCount()); + assertEquals(startBlockHits, cache.getStats().getHitCount()); + assertEquals(startBlockMiss, cache.getStats().getMissCount()); + + // flush the data + LOG.debug("Flushing cache"); + region.flush(true); + + // expect two more blocks in cache - DATA and ROOT_INDEX + // , no change in hits/misses + long expectedBlockCount = startBlockCount + 2; + long expectedBlockHits = startBlockHits; + long expectedBlockMiss = startBlockMiss; + assertEquals(expectedBlockCount, cache.getBlockCount()); + assertEquals(expectedBlockHits, cache.getStats().getHitCount()); + assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); + // read the data and expect same blocks, one new hit, no misses + assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data)); + assertEquals(expectedBlockCount, cache.getBlockCount()); + assertEquals(++expectedBlockHits, cache.getStats().getHitCount()); + assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); + // insert a second column, read the row, no new blocks, one new hit + byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER); + byte[] data2 = Bytes.add(data, data); + put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER2, data2); + table.put(put); + Result r = table.get(new Get(ROW)); + assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data)); + assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2)); + assertEquals(expectedBlockCount, cache.getBlockCount()); + assertEquals(++expectedBlockHits, cache.getStats().getHitCount()); + assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); + // flush, one new block + LOG.info("Flushing cache"); + region.flush(true); + + // + 1 for Index Block, +1 for data block + expectedBlockCount += 2; + assertEquals(expectedBlockCount, cache.getBlockCount()); + assertEquals(expectedBlockHits, cache.getStats().getHitCount()); + assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); + // compact, net minus two blocks, two hits, no misses + LOG.info("Compacting"); + assertEquals(2, store.getStorefilesCount()); + region.compact(true); + store.closeAndArchiveCompactedFiles(); + waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max + assertEquals(1, store.getStorefilesCount()); + // evicted two data blocks and two index blocks and compaction does not cache new blocks + expectedBlockCount = startBlockCount; + assertEquals(expectedBlockCount, cache.getBlockCount()); + expectedBlockHits += 2; + assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); + assertEquals(expectedBlockHits, cache.getStats().getHitCount()); + // read the row, this should be a cache miss because we don't cache data + // blocks on compaction + r = table.get(new Get(ROW)); + assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data)); + assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2)); + expectedBlockCount += 1; // cached one data block + assertEquals(expectedBlockCount, cache.getBlockCount()); + assertEquals(expectedBlockHits, cache.getStats().getHitCount()); + assertEquals(++expectedBlockMiss, cache.getStats().getMissCount()); + } + } + + private void waitForStoreFileCount(HStore store, int count, int timeout) + throws InterruptedException { + await().atMost(Duration.ofMillis(timeout)) + .untilAsserted(() -> assertEquals(count, store.getStorefilesCount())); + } + + /** + * Tests the non cached version of getRegionLocator by moving a region. + */ + @TestTemplate + public void testNonCachedGetRegionLocation() throws Exception { + // Test Initialization. + byte[] family1 = Bytes.toBytes("f1"); + byte[] family2 = Bytes.toBytes("f2"); + TEST_UTIL.createTable(tableName, new byte[][] { family1, family2 }, 10); + try (Connection conn = getConnection(); Table ignored = conn.getTable(tableName); + Admin admin = conn.getAdmin(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { + List allRegionLocations = locator.getAllRegionLocations(); + assertEquals(1, allRegionLocations.size()); + RegionInfo regionInfo = allRegionLocations.get(0).getRegion(); + ServerName addrBefore = allRegionLocations.get(0).getServerName(); + // Verify region location before move. + HRegionLocation addrCache = locator.getRegionLocation(regionInfo.getStartKey(), false); + HRegionLocation addrNoCache = locator.getRegionLocation(regionInfo.getStartKey(), true); + + assertEquals(addrBefore.getPort(), addrCache.getPort()); + assertEquals(addrBefore.getPort(), addrNoCache.getPort()); + + // Make sure more than one server. + if (TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size() <= 1) { + TEST_UTIL.getMiniHBaseCluster().startRegionServer(); + Waiter.waitFor(TEST_UTIL.getConfiguration(), 30000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size() > 1; + } + }); + } + + ServerName addrAfter = null; + // Now move the region to a different server. + for (int i = 0; i + < TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size(); i++) { + HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(i); + ServerName addr = regionServer.getServerName(); + if (addr.getPort() != addrBefore.getPort()) { + admin.move(regionInfo.getEncodedNameAsBytes(), addr); + // Wait for the region to move. + Thread.sleep(5000); + addrAfter = addr; + break; + } + } + + // Verify the region was moved. + addrCache = locator.getRegionLocation(regionInfo.getStartKey(), false); + addrNoCache = locator.getRegionLocation(regionInfo.getStartKey(), true); + assertNotNull(addrAfter); + assertTrue(addrAfter.getPort() != addrCache.getPort()); + assertEquals(addrAfter.getPort(), addrNoCache.getPort()); + } + } + + /** + * Tests getRegionsInRange by creating some regions over which a range of keys spans; then + * changing the key range. + */ + @TestTemplate + public void testGetRegionsInRange() throws Exception { + // Test Initialization. + byte[] startKey = Bytes.toBytes("ddc"); + byte[] endKey = Bytes.toBytes("mmm"); + TEST_UTIL.createMultiRegionTable(tableName, new byte[][] { FAMILY }, 10); + + int numOfRegions; + try (Connection conn = getConnection(); RegionLocator r = conn.getRegionLocator(tableName)) { + numOfRegions = r.getStartKeys().length; + } + assertEquals(26, numOfRegions); + + // Get the regions in this range + List regionsList = getRegionsInRange(tableName, startKey, endKey); + assertEquals(10, regionsList.size()); + + // Change the start key + startKey = Bytes.toBytes("fff"); + regionsList = getRegionsInRange(tableName, startKey, endKey); + assertEquals(7, regionsList.size()); + + // Change the end key + endKey = Bytes.toBytes("nnn"); + regionsList = getRegionsInRange(tableName, startKey, endKey); + assertEquals(8, regionsList.size()); + + // Empty start key + regionsList = getRegionsInRange(tableName, HConstants.EMPTY_START_ROW, endKey); + assertEquals(13, regionsList.size()); + + // Empty end key + regionsList = getRegionsInRange(tableName, startKey, HConstants.EMPTY_END_ROW); + assertEquals(21, regionsList.size()); + + // Both start and end keys empty + regionsList = + getRegionsInRange(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); + assertEquals(26, regionsList.size()); + + // Change the end key to somewhere in the last block + endKey = Bytes.toBytes("zzz1"); + regionsList = getRegionsInRange(tableName, startKey, endKey); + assertEquals(21, regionsList.size()); + + // Change the start key to somewhere in the first block + startKey = Bytes.toBytes("aac"); + regionsList = getRegionsInRange(tableName, startKey, endKey); + assertEquals(26, regionsList.size()); + + // Make start and end key the same + startKey = Bytes.toBytes("ccc"); + endKey = Bytes.toBytes("ccc"); + regionsList = getRegionsInRange(tableName, startKey, endKey); + assertEquals(1, regionsList.size()); + } + + private List getRegionsInRange(TableName tableName, byte[] startKey, + byte[] endKey) throws IOException { + List regionsInRange = new ArrayList<>(); + byte[] currentKey = startKey; + final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW); + try (Connection conn = getConnection(); RegionLocator r = conn.getRegionLocator(tableName)) { + do { + HRegionLocation regionLocation = r.getRegionLocation(currentKey); + regionsInRange.add(regionLocation); + currentKey = regionLocation.getRegion().getEndKey(); + } while ( + !Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) + && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0) + ); + return regionsInRange; + } + } + + @TestTemplate + public void testJira6912() throws Exception { + TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 10); + try (Connection conn = getConnection(); Table foo = conn.getTable(tableName)) { + List puts = new ArrayList<>(); + for (int i = 0; i != 100; i++) { + Put put = new Put(Bytes.toBytes(i)); + put.addColumn(FAMILY, FAMILY, Bytes.toBytes(i)); + puts.add(put); + } + foo.put(puts); + // If i comment this out it works + TEST_UTIL.flush(); + + Scan scan = new Scan(); + scan.withStartRow(Bytes.toBytes(1)); + scan.withStopRow(Bytes.toBytes(3)); + scan.addColumn(FAMILY, FAMILY); + scan.setFilter( + new RowFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes(1)))); + + try (ResultScanner scanner = foo.getScanner(scan)) { + Result[] bar = scanner.next(100); + assertEquals(1, bar.length); + } + } + } + + @TestTemplate + public void testScanNullQualifier() throws IOException { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + put = new Put(ROW); + put.addColumn(FAMILY, null, VALUE); + table.put(put); + LOG.info("Row put"); + + Scan scan = new Scan(); + scan.addColumn(FAMILY, null); + + ResultScanner scanner = table.getScanner(scan); + Result[] bar = scanner.next(100); + assertEquals(1, bar.length); + assertEquals(1, bar[0].size()); + + scan = new Scan(); + scan.addFamily(FAMILY); + + scanner = table.getScanner(scan); + bar = scanner.next(100); + assertEquals(1, bar.length); + assertEquals(2, bar[0].size()); + } + } + + @TestTemplate + public void testRawScanRespectsVersions() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + byte[] row = Bytes.toBytes("row"); + + // put the same row 4 times, with different values + Put p = new Put(row); + p.addColumn(FAMILY, QUALIFIER, 10, VALUE); + table.put(p); + p = new Put(row); + p.addColumn(FAMILY, QUALIFIER, 11, ArrayUtils.add(VALUE, (byte) 2)); + table.put(p); + + p = new Put(row); + p.addColumn(FAMILY, QUALIFIER, 12, ArrayUtils.add(VALUE, (byte) 3)); + table.put(p); + + p = new Put(row); + p.addColumn(FAMILY, QUALIFIER, 13, ArrayUtils.add(VALUE, (byte) 4)); + table.put(p); + + int versions = 4; + Scan s = new Scan().withStartRow(row); + // get all the possible versions + s.readAllVersions(); + s.setRaw(true); + + try (ResultScanner scanner = table.getScanner(s)) { + int count = 0; + for (Result r : scanner) { + assertEquals(versions, r.listCells().size(), + "Found an unexpected number of results for the row!"); + count++; + } + assertEquals(1, count, + "Found more than a single row when raw scanning the table with a single row!"); + } + + // then if we decrease the number of versions, but keep the scan raw, we should see exactly + // that number of versions + versions = 2; + s.readVersions(versions); + try (ResultScanner scanner = table.getScanner(s)) { + int count = 0; + for (Result r : scanner) { + assertEquals(versions, r.listCells().size(), + "Found an unexpected number of results for the row!"); + count++; + } + assertEquals(1, count, + "Found more than a single row when raw scanning the table with a single row!"); + } + + // finally, if we turn off raw scanning, but max out the number of versions, we should go back + // to seeing just three + versions = 3; + s.readVersions(versions); + try (ResultScanner scanner = table.getScanner(s)) { + int count = 0; + for (Result r : scanner) { + assertEquals(versions, r.listCells().size(), + "Found an unexpected number of results for the row!"); + count++; + } + assertEquals(1, count, + "Found more than a single row when raw scanning the table with a single row!"); + } + + } + TEST_UTIL.deleteTable(tableName); + } + + @TestTemplate + public void testEmptyFilterList() throws Exception { + // Test Initialization. + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + // Insert one row each region + Put put = new Put(Bytes.toBytes("row")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + List scanResults = new LinkedList<>(); + Scan scan = new Scan(); + scan.setFilter(new FilterList()); + try (ResultScanner scanner = table.getScanner(scan)) { + for (Result r : scanner) { + scanResults.add(r); + } + } + assertEquals(1, scanResults.size()); + Get g = new Get(Bytes.toBytes("row")); + g.setFilter(new FilterList()); + Result getResult = table.get(g); + Result scanResult = scanResults.get(0); + assertEquals(scanResult.rawCells().length, getResult.rawCells().length); + for (int i = 0; i != scanResult.rawCells().length; ++i) { + Cell scanCell = scanResult.rawCells()[i]; + Cell getCell = getResult.rawCells()[i]; + assertEquals(0, Bytes.compareTo(CellUtil.cloneRow(scanCell), CellUtil.cloneRow(getCell))); + assertEquals(0, + Bytes.compareTo(CellUtil.cloneFamily(scanCell), CellUtil.cloneFamily(getCell))); + assertEquals(0, + Bytes.compareTo(CellUtil.cloneQualifier(scanCell), CellUtil.cloneQualifier(getCell))); + assertEquals(0, + Bytes.compareTo(CellUtil.cloneValue(scanCell), CellUtil.cloneValue(getCell))); + } + } + } + + @TestTemplate + public void testSmallScan() throws Exception { + // Test Initialization. + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + // Insert one row each region + int insertNum = 10; + for (int i = 0; i < 10; i++) { + Put put = new Put(Bytes.toBytes("row" + String.format("%03d", i))); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + } + + // normal scan + try (ResultScanner scanner = table.getScanner(new Scan())) { + int count = 0; + for (Result r : scanner) { + assertFalse(r.isEmpty()); + count++; + } + assertEquals(insertNum, count); + } + + // small scan + Scan scan = new Scan().withStartRow(HConstants.EMPTY_START_ROW) + .withStopRow(HConstants.EMPTY_END_ROW, true); + scan.setReadType(ReadType.PREAD); + scan.setCaching(2); + try (ResultScanner scanner = table.getScanner(scan)) { + int count = 0; + for (Result r : scanner) { + assertFalse(r.isEmpty()); + count++; + } + assertEquals(insertNum, count); + } + } + } + + @TestTemplate + public void testSuperSimpleWithReverseScan() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + Put put = new Put(Bytes.toBytes("0-b11111-0000000000000000000")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + put = new Put(Bytes.toBytes("0-b11111-0000000000000000002")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + put = new Put(Bytes.toBytes("0-b11111-0000000000000000004")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + put = new Put(Bytes.toBytes("0-b11111-0000000000000000006")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + put = new Put(Bytes.toBytes("0-b11111-0000000000000000008")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + put = new Put(Bytes.toBytes("0-b22222-0000000000000000001")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + put = new Put(Bytes.toBytes("0-b22222-0000000000000000003")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + put = new Put(Bytes.toBytes("0-b22222-0000000000000000005")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + put = new Put(Bytes.toBytes("0-b22222-0000000000000000007")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + put = new Put(Bytes.toBytes("0-b22222-0000000000000000009")); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + Scan scan = new Scan().withStartRow(Bytes.toBytes("0-b11111-9223372036854775807")) + .withStopRow(Bytes.toBytes("0-b11111-0000000000000000000"), true); + scan.setReversed(true); + try (ResultScanner scanner = ht.getScanner(scan)) { + Result result = scanner.next(); + assertTrue(Bytes.equals(result.getRow(), Bytes.toBytes("0-b11111-0000000000000000008"))); + } + } + } + + @TestTemplate + public void testFiltersWithReverseScan() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + byte[][] ROWS = makeN(ROW, 10); + byte[][] QUALIFIERS = + { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), + Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), + Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), + Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), + Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; + for (int i = 0; i < 10; i++) { + Put put = new Put(ROWS[i]); + put.addColumn(FAMILY, QUALIFIERS[i], VALUE); + ht.put(put); + } + Scan scan = new Scan(); + scan.setReversed(true); + scan.addFamily(FAMILY); + Filter filter = + new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator("col[1-5]")); + scan.setFilter(filter); + try (ResultScanner scanner = ht.getScanner(scan)) { + int expectedIndex = 5; + for (Result result : scanner) { + assertEquals(1, result.size()); + Cell c = result.rawCells()[0]; + assertTrue(Bytes.equals(c.getRowArray(), c.getRowOffset(), c.getRowLength(), + ROWS[expectedIndex], 0, ROWS[expectedIndex].length)); + assertTrue( + Bytes.equals(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength(), + QUALIFIERS[expectedIndex], 0, QUALIFIERS[expectedIndex].length)); + expectedIndex--; + } + assertEquals(0, expectedIndex); + } + } + } + + @TestTemplate + public void testKeyOnlyFilterWithReverseScan() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + byte[][] ROWS = makeN(ROW, 10); + byte[][] QUALIFIERS = + { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), + Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), + Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), + Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), + Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; + for (int i = 0; i < 10; i++) { + Put put = new Put(ROWS[i]); + put.addColumn(FAMILY, QUALIFIERS[i], VALUE); + ht.put(put); + } + Scan scan = new Scan(); + scan.setReversed(true); + scan.addFamily(FAMILY); + Filter filter = new KeyOnlyFilter(true); + scan.setFilter(filter); + try (ResultScanner ignored = ht.getScanner(scan)) { + int count = 0; + for (Result result : ht.getScanner(scan)) { + assertEquals(1, result.size()); + assertEquals(Bytes.SIZEOF_INT, result.rawCells()[0].getValueLength()); + assertEquals(VALUE.length, Bytes.toInt(CellUtil.cloneValue(result.rawCells()[0]))); + count++; + } + assertEquals(10, count); + } + } + } + + /** + * Test simple table and non-existent row cases. + */ + @TestTemplate + public void testSimpleMissingWithReverseScan() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + byte[][] ROWS = makeN(ROW, 4); + + // Try to get a row on an empty table + Scan scan = new Scan(); + scan.setReversed(true); + Result result = getSingleScanResult(ht, scan); + assertNullResult(result); + + scan = new Scan().withStartRow(ROWS[0]); + scan.setReversed(true); + result = getSingleScanResult(ht, scan); + assertNullResult(result); + + scan = new Scan().withStartRow(ROWS[0]).withStopRow(ROWS[1], true); + scan.setReversed(true); + result = getSingleScanResult(ht, scan); + assertNullResult(result); + + scan = new Scan(); + scan.setReversed(true); + scan.addFamily(FAMILY); + result = getSingleScanResult(ht, scan); + assertNullResult(result); + + scan = new Scan(); + scan.setReversed(true); + scan.addColumn(FAMILY, QUALIFIER); + result = getSingleScanResult(ht, scan); + assertNullResult(result); + + // Insert a row + Put put = new Put(ROWS[2]); + put.addColumn(FAMILY, QUALIFIER, VALUE); + ht.put(put); + + // Make sure we can scan the row + scan = new Scan(); + scan.setReversed(true); + result = getSingleScanResult(ht, scan); + assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); + + scan = new Scan().withStartRow(ROWS[3]).withStopRow(ROWS[0], true); + scan.setReversed(true); + result = getSingleScanResult(ht, scan); + assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); + + scan = new Scan().withStartRow(ROWS[2]).withStopRow(ROWS[1], true); + scan.setReversed(true); + result = getSingleScanResult(ht, scan); + assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); + + // Try to scan empty rows around it + // Introduced MemStore#shouldSeekForReverseScan to fix the following + scan = new Scan().withStartRow(ROWS[1]); + scan.setReversed(true); + result = getSingleScanResult(ht, scan); + assertNullResult(result); + } + } + + @TestTemplate + public void testNullWithReverseScan() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + // Null qualifier (should work) + Put put = new Put(ROW); + put.addColumn(FAMILY, null, VALUE); + ht.put(put); + scanTestNull(ht, ROW, FAMILY, VALUE, true); + Delete delete = new Delete(ROW); + delete.addColumns(FAMILY, null); + ht.delete(delete); + } + + // Use a new table + TableName newTableName = TableName.valueOf(tableName.toString() + "2"); + TEST_UTIL.createTable(newTableName, FAMILY); + try (Connection conn = getConnection(); Table ht = conn.getTable(newTableName)) { + // Empty qualifier, byte[0] instead of null (should work) + Put put = new Put(ROW); + put.addColumn(FAMILY, HConstants.EMPTY_BYTE_ARRAY, VALUE); + ht.put(put); + scanTestNull(ht, ROW, FAMILY, VALUE, true); + TEST_UTIL.flush(); + scanTestNull(ht, ROW, FAMILY, VALUE, true); + Delete delete = new Delete(ROW); + delete.addColumns(FAMILY, HConstants.EMPTY_BYTE_ARRAY); + ht.delete(delete); + // Null value + put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, null); + ht.put(put); + Scan scan = new Scan(); + scan.setReversed(true); + scan.addColumn(FAMILY, QUALIFIER); + Result result = getSingleScanResult(ht, scan); + assertSingleResult(result, ROW, FAMILY, QUALIFIER, null); + } + } + + @TestTemplate + @SuppressWarnings("checkstyle:MethodLength") + public void testDeletesWithReverseScan() throws Exception { + byte[][] ROWS = makeNAscii(ROW, 6); + byte[][] FAMILIES = makeNAscii(FAMILY, 3); + byte[][] VALUES = makeN(VALUE, 5); + long[] ts = { 1000, 2000, 3000, 4000, 5000 }; + TEST_UTIL.createTable(tableName, FAMILIES, 3); + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + Put put = new Put(ROW); + put.addColumn(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); + put.addColumn(FAMILIES[0], QUALIFIER, ts[1], VALUES[1]); + ht.put(put); + + Delete delete = new Delete(ROW); + delete.addFamily(FAMILIES[0], ts[0]); + ht.delete(delete); + + Scan scan = new Scan().withStartRow(ROW); + scan.setReversed(true); + scan.addFamily(FAMILIES[0]); + scan.readVersions(Integer.MAX_VALUE); + Result result = getSingleScanResult(ht, scan); + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[] { ts[1] }, + new byte[][] { VALUES[1] }, 0, 0); + + // Test delete latest version + put = new Put(ROW); + put.addColumn(FAMILIES[0], QUALIFIER, ts[4], VALUES[4]); + put.addColumn(FAMILIES[0], QUALIFIER, ts[2], VALUES[2]); + put.addColumn(FAMILIES[0], QUALIFIER, ts[3], VALUES[3]); + put.addColumn(FAMILIES[0], null, ts[4], VALUES[4]); + put.addColumn(FAMILIES[0], null, ts[2], VALUES[2]); + put.addColumn(FAMILIES[0], null, ts[3], VALUES[3]); + ht.put(put); + + delete = new Delete(ROW); + delete.addColumn(FAMILIES[0], QUALIFIER); // ts[4] + ht.delete(delete); + + scan = new Scan().withStartRow(ROW); + scan.setReversed(true); + scan.addColumn(FAMILIES[0], QUALIFIER); + scan.readVersions(Integer.MAX_VALUE); + result = getSingleScanResult(ht, scan); + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[] { ts[1], ts[2], ts[3] }, + new byte[][] { VALUES[1], VALUES[2], VALUES[3] }, 0, 2); + + // Test for HBASE-1847 + delete = new Delete(ROW); + delete.addColumn(FAMILIES[0], null); + ht.delete(delete); + + // Cleanup null qualifier + delete = new Delete(ROW); + delete.addColumns(FAMILIES[0], null); + ht.delete(delete); + + // Expected client behavior might be that you can re-put deleted values + // But alas, this is not to be. We can't put them back in either case. + + put = new Put(ROW); + put.addColumn(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); + put.addColumn(FAMILIES[0], QUALIFIER, ts[4], VALUES[4]); + ht.put(put); + + // The Scanner returns the previous values, the expected-naive-unexpected + // behavior + + scan = new Scan().withStartRow(ROW); + scan.setReversed(true); + scan.addFamily(FAMILIES[0]); + scan.readVersions(Integer.MAX_VALUE); + result = getSingleScanResult(ht, scan); + assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[] { ts[1], ts[2], ts[3] }, + new byte[][] { VALUES[1], VALUES[2], VALUES[3] }, 0, 2); + + // Test deleting an entire family from one row but not the other various + // ways + + put = new Put(ROWS[0]); + put.addColumn(FAMILIES[1], QUALIFIER, ts[0], VALUES[0]); + put.addColumn(FAMILIES[1], QUALIFIER, ts[1], VALUES[1]); + put.addColumn(FAMILIES[2], QUALIFIER, ts[2], VALUES[2]); + put.addColumn(FAMILIES[2], QUALIFIER, ts[3], VALUES[3]); + ht.put(put); + + put = new Put(ROWS[1]); + put.addColumn(FAMILIES[1], QUALIFIER, ts[0], VALUES[0]); + put.addColumn(FAMILIES[1], QUALIFIER, ts[1], VALUES[1]); + put.addColumn(FAMILIES[2], QUALIFIER, ts[2], VALUES[2]); + put.addColumn(FAMILIES[2], QUALIFIER, ts[3], VALUES[3]); + ht.put(put); + + put = new Put(ROWS[2]); + put.addColumn(FAMILIES[1], QUALIFIER, ts[0], VALUES[0]); + put.addColumn(FAMILIES[1], QUALIFIER, ts[1], VALUES[1]); + put.addColumn(FAMILIES[2], QUALIFIER, ts[2], VALUES[2]); + put.addColumn(FAMILIES[2], QUALIFIER, ts[3], VALUES[3]); + ht.put(put); + + delete = new Delete(ROWS[0]); + delete.addFamily(FAMILIES[2]); + ht.delete(delete); + + delete = new Delete(ROWS[1]); + delete.addColumns(FAMILIES[1], QUALIFIER); + ht.delete(delete); + + delete = new Delete(ROWS[2]); + delete.addColumn(FAMILIES[1], QUALIFIER); + delete.addColumn(FAMILIES[1], QUALIFIER); + delete.addColumn(FAMILIES[2], QUALIFIER); + ht.delete(delete); + + scan = new Scan().withStartRow(ROWS[0]); + scan.setReversed(true); + scan.addFamily(FAMILIES[1]); + scan.addFamily(FAMILIES[2]); + scan.readVersions(Integer.MAX_VALUE); + result = getSingleScanResult(ht, scan); + assertEquals(2, result.size(), "Expected 2 keys but received " + result.size()); + assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, new long[] { ts[0], ts[1] }, + new byte[][] { VALUES[0], VALUES[1] }, 0, 1); + + scan = new Scan().withStartRow(ROWS[1]); + scan.setReversed(true); + scan.addFamily(FAMILIES[1]); + scan.addFamily(FAMILIES[2]); + scan.readVersions(Integer.MAX_VALUE); + result = getSingleScanResult(ht, scan); + assertEquals(2, result.size(), "Expected 2 keys but received " + result.size()); + + scan = new Scan().withStartRow(ROWS[2]); + scan.setReversed(true); + scan.addFamily(FAMILIES[1]); + scan.addFamily(FAMILIES[2]); + scan.readVersions(Integer.MAX_VALUE); + result = getSingleScanResult(ht, scan); + assertEquals(1, result.size()); + assertNResult(result, ROWS[2], FAMILIES[2], QUALIFIER, new long[] { ts[2] }, + new byte[][] { VALUES[2] }, 0, 0); + + // Test if we delete the family first in one row (HBASE-1541) + + delete = new Delete(ROWS[3]); + delete.addFamily(FAMILIES[1]); + ht.delete(delete); + + put = new Put(ROWS[3]); + put.addColumn(FAMILIES[2], QUALIFIER, VALUES[0]); + ht.put(put); + + put = new Put(ROWS[4]); + put.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]); + put.addColumn(FAMILIES[2], QUALIFIER, VALUES[2]); + ht.put(put); + + scan = new Scan().withStartRow(ROWS[4]); + scan.setReversed(true); + scan.addFamily(FAMILIES[1]); + scan.addFamily(FAMILIES[2]); + scan.readVersions(Integer.MAX_VALUE); + try (ResultScanner scanner = ht.getScanner(scan)) { + result = scanner.next(); + assertEquals(2, result.size(), "Expected 2 keys but received " + result.size()); + assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]), ROWS[4])); + assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[1]), ROWS[4])); + assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]), VALUES[1])); + assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[1]), VALUES[2])); + result = scanner.next(); + assertEquals(1, result.size(), "Expected 1 key but received " + result.size()); + assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]), ROWS[3])); + assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]), VALUES[0])); + } + } + } + + /** + * Tests reversed scan under multi regions + */ + @TestTemplate + public void testReversedScanUnderMultiRegions() throws Exception { + // Test Initialization. + byte[] maxByteArray = ConnectionUtils.MAX_BYTE_ARRAY; + byte[][] splitRows = new byte[][] { Bytes.toBytes("005"), + Bytes.add(Bytes.toBytes("005"), Bytes.multiple(maxByteArray, 16)), Bytes.toBytes("006"), + Bytes.add(Bytes.toBytes("006"), Bytes.multiple(maxByteArray, 8)), Bytes.toBytes("007"), + Bytes.add(Bytes.toBytes("007"), Bytes.multiple(maxByteArray, 4)), Bytes.toBytes("008"), + Bytes.multiple(maxByteArray, 2) }; + TEST_UTIL.createTable(tableName, FAMILY, splitRows); + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + try (RegionLocator l = conn.getRegionLocator(tableName)) { + assertEquals(splitRows.length + 1, l.getAllRegionLocations().size()); + } + // Insert one row each region + int insertNum = splitRows.length; + for (byte[] splitRow : splitRows) { + Put put = new Put(splitRow); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + } + + // scan forward + try (ResultScanner scanner = table.getScanner(new Scan())) { + int count = 0; + for (Result r : scanner) { + assertFalse(r.isEmpty()); + count++; + } + assertEquals(insertNum, count); + } + + // scan backward + Scan scan = new Scan(); + scan.setReversed(true); + try (ResultScanner scanner = table.getScanner(scan)) { + int count = 0; + byte[] lastRow = null; + for (Result r : scanner) { + assertFalse(r.isEmpty()); + count++; + byte[] thisRow = r.getRow(); + if (lastRow != null) { + assertTrue(Bytes.compareTo(thisRow, lastRow) < 0, "Error scan order, last row= " + + Bytes.toString(lastRow) + ",this row=" + Bytes.toString(thisRow)); + } + lastRow = thisRow; + } + assertEquals(insertNum, count); + } + } + } + + /** + * Tests reversed scan under multi regions + */ + @TestTemplate + public void testSmallReversedScanUnderMultiRegions() throws Exception { + // Test Initialization. + byte[][] splitRows = new byte[][] { Bytes.toBytes("000"), Bytes.toBytes("002"), + Bytes.toBytes("004"), Bytes.toBytes("006"), Bytes.toBytes("008"), Bytes.toBytes("010") }; + TEST_UTIL.createTable(tableName, FAMILY, splitRows); + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + try (RegionLocator l = conn.getRegionLocator(tableName)) { + assertEquals(splitRows.length + 1, l.getAllRegionLocations().size()); + } + for (byte[] splitRow : splitRows) { + Put put = new Put(splitRow); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + + byte[] nextRow = Bytes.copy(splitRow); + nextRow[nextRow.length - 1]++; + + put = new Put(nextRow); + put.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(put); + } + + // scan forward + try (ResultScanner scanner = table.getScanner(new Scan())) { + int count = 0; + for (Result r : scanner) { + assertTrue(!r.isEmpty()); + count++; + } + assertEquals(12, count); + } + + reverseScanTest(table, ReadType.STREAM); + reverseScanTest(table, ReadType.PREAD); + reverseScanTest(table, ReadType.DEFAULT); + } + } + + private void reverseScanTest(Table table, ReadType readType) throws IOException { + // scan backward + Scan scan = new Scan(); + scan.setReversed(true); + try (ResultScanner scanner = table.getScanner(scan)) { + int count = 0; + byte[] lastRow = null; + for (Result r : scanner) { + assertTrue(!r.isEmpty()); + count++; + byte[] thisRow = r.getRow(); + if (lastRow != null) { + assertTrue(Bytes.compareTo(thisRow, lastRow) < 0, "Error scan order, last row= " + + Bytes.toString(lastRow) + ",this row=" + Bytes.toString(thisRow)); + } + lastRow = thisRow; + } + assertEquals(12, count); + } + + scan = new Scan(); + scan.setReadType(readType); + scan.setReversed(true); + scan.withStartRow(Bytes.toBytes("002")); + try (ResultScanner scanner = table.getScanner(scan)) { + int count = 0; + byte[] lastRow = null; + for (Result r : scanner) { + assertTrue(!r.isEmpty()); + count++; + byte[] thisRow = r.getRow(); + if (lastRow != null) { + assertTrue(Bytes.compareTo(thisRow, lastRow) < 0, "Error scan order, last row= " + + Bytes.toString(lastRow) + ",this row=" + Bytes.toString(thisRow)); + } + lastRow = thisRow; + } + assertEquals(3, count); // 000 001 002 + } + + scan = new Scan(); + scan.setReadType(readType); + scan.setReversed(true); + scan.withStartRow(Bytes.toBytes("002")); + scan.withStopRow(Bytes.toBytes("000")); + try (ResultScanner scanner = table.getScanner(scan)) { + int count = 0; + byte[] lastRow = null; + for (Result r : scanner) { + assertFalse(r.isEmpty()); + count++; + byte[] thisRow = r.getRow(); + if (lastRow != null) { + assertTrue(Bytes.compareTo(thisRow, lastRow) < 0, "Error scan order, last row= " + + Bytes.toString(lastRow) + ",this row=" + Bytes.toString(thisRow)); + } + lastRow = thisRow; + } + assertEquals(2, count); // 001 002 + } + + scan = new Scan(); + scan.setReadType(readType); + scan.setReversed(true); + scan.withStartRow(Bytes.toBytes("001")); + try (ResultScanner scanner = table.getScanner(scan)) { + int count = 0; + byte[] lastRow = null; + for (Result r : scanner) { + assertFalse(r.isEmpty()); + count++; + byte[] thisRow = r.getRow(); + if (lastRow != null) { + assertTrue(Bytes.compareTo(thisRow, lastRow) < 0, "Error scan order, last row= " + + Bytes.toString(lastRow) + ",this row=" + Bytes.toString(thisRow)); + } + lastRow = thisRow; + } + assertEquals(2, count); // 000 001 + } + + scan = new Scan(); + scan.setReadType(readType); + scan.setReversed(true); + scan.withStartRow(Bytes.toBytes("000")); + try (ResultScanner scanner = table.getScanner(scan)) { + int count = 0; + byte[] lastRow = null; + for (Result r : scanner) { + assertFalse(r.isEmpty()); + count++; + byte[] thisRow = r.getRow(); + if (lastRow != null) { + assertTrue(Bytes.compareTo(thisRow, lastRow) < 0, "Error scan order, last row= " + + Bytes.toString(lastRow) + ",this row=" + Bytes.toString(thisRow)); + } + lastRow = thisRow; + } + assertEquals(1, count); // 000 + } + + scan = new Scan(); + scan.setReadType(readType); + scan.setReversed(true); + scan.withStartRow(Bytes.toBytes("006")); + scan.withStopRow(Bytes.toBytes("002")); + try (ResultScanner scanner = table.getScanner(scan)) { + int count = 0; + byte[] lastRow = null; + for (Result r : scanner) { + assertFalse(r.isEmpty()); + count++; + byte[] thisRow = r.getRow(); + if (lastRow != null) { + assertTrue(Bytes.compareTo(thisRow, lastRow) < 0, "Error scan order, last row= " + + Bytes.toString(lastRow) + ",this row=" + Bytes.toString(thisRow)); + } + lastRow = thisRow; + } + assertEquals(4, count); // 003 004 005 006 + } + } + + @TestTemplate + public void testFilterAllRecords() throws IOException { + Scan scan = new Scan(); + scan.setBatch(1); + scan.setCaching(1); + // Filter out any records + scan.setFilter(new FilterList(new FirstKeyOnlyFilter(), new InclusiveStopFilter(new byte[0]))); + try (Connection conn = getConnection(); + Table table = conn.getTable(TableName.META_TABLE_NAME)) { + try (ResultScanner s = table.getScanner(scan)) { + assertNull(s.next()); + } + } + } + + @TestTemplate + public void testCellSizeLimit() throws IOException { + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) + .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, Integer.toString(10 * 1024)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + try (Connection conn = getConnection()) { + try (Admin admin = conn.getAdmin()) { + admin.createTable(tableDescriptor); + } + try (Table t = conn.getTable(tableName)) { + // Will succeed + t.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(0L))); + t.increment(new Increment(ROW).addColumn(FAMILY, QUALIFIER, 1L)); + + // Will succeed + t.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, new byte[9 * 1024])); + + // Will fail + assertThrows(IOException.class, + () -> t.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, new byte[10 * 1024])), + "Oversize cell failed to trigger exception"); + assertThrows(IOException.class, + () -> t.append(new Append(ROW).addColumn(FAMILY, QUALIFIER, new byte[2 * 1024])), + "Oversize cell failed to trigger exception"); + } + } + } + + @TestTemplate + public void testCellSizeNoLimit() throws IOException { + + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) + .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, Integer.toString(0)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); + try (Connection conn = getConnection()) { + try (Admin admin = conn.getAdmin()) { + admin.createTable(tableDescriptor); + } + // Will succeed + try (Table ht = conn.getTable(tableName)) { + ht.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, + new byte[HRegion.DEFAULT_MAX_CELL_SIZE - 1024])); + ht.append(new Append(ROW).addColumn(FAMILY, QUALIFIER, new byte[1024 + 1])); + } + } + } + + @TestTemplate + public void testDeleteSpecifiedVersionOfSpecifiedColumn() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY, 5); + byte[][] VALUES = makeN(VALUE, 5); + long[] ts = { 1000, 2000, 3000, 4000, 5000 }; + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + Put put = new Put(ROW); + // Put version 1000,2000,3000,4000 of column FAMILY:QUALIFIER + for (int t = 0; t < 4; t++) { + put.addColumn(FAMILY, QUALIFIER, ts[t], VALUES[t]); + } + ht.put(put); + + Delete delete = new Delete(ROW); + // Delete version 3000 of column FAMILY:QUALIFIER + delete.addColumn(FAMILY, QUALIFIER, ts[2]); + ht.delete(delete); + + Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(Integer.MAX_VALUE); + Result result = ht.get(get); + // verify version 1000,2000,4000 remains for column FAMILY:QUALIFIER + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[1], ts[3] }, + new byte[][] { VALUES[0], VALUES[1], VALUES[3] }, 0, 2); + + delete = new Delete(ROW); + // Delete a version 5000 of column FAMILY:QUALIFIER which didn't exist + delete.addColumn(FAMILY, QUALIFIER, ts[4]); + ht.delete(delete); + + get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(Integer.MAX_VALUE); + result = ht.get(get); + // verify version 1000,2000,4000 remains for column FAMILY:QUALIFIER + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[1], ts[3] }, + new byte[][] { VALUES[0], VALUES[1], VALUES[3] }, 0, 2); + } + } + + @TestTemplate + public void testDeleteLatestVersionOfSpecifiedColumn() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY, 5); + byte[][] VALUES = makeN(VALUE, 5); + long[] ts = { 1000, 2000, 3000, 4000, 5000 }; + try (Connection conn = getConnection(); Table ht = conn.getTable(tableName)) { + Put put = new Put(ROW); + // Put version 1000,2000,3000,4000 of column FAMILY:QUALIFIER + for (int t = 0; t < 4; t++) { + put.addColumn(FAMILY, QUALIFIER, ts[t], VALUES[t]); + } + ht.put(put); + + Delete delete = new Delete(ROW); + // Delete latest version of column FAMILY:QUALIFIER + delete.addColumn(FAMILY, QUALIFIER); + ht.delete(delete); + + Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(Integer.MAX_VALUE); + Result result = ht.get(get); + // verify version 1000,2000,3000 remains for column FAMILY:QUALIFIER + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[1], ts[2] }, + new byte[][] { VALUES[0], VALUES[1], VALUES[2] }, 0, 2); + + delete = new Delete(ROW); + // Delete two latest version of column FAMILY:QUALIFIER + delete.addColumn(FAMILY, QUALIFIER); + delete.addColumn(FAMILY, QUALIFIER); + ht.delete(delete); + + get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(Integer.MAX_VALUE); + result = ht.get(get); + // verify version 1000 remains for column FAMILY:QUALIFIER + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0] }, + new byte[][] { VALUES[0] }, 0, 0); + + put = new Put(ROW); + // Put a version 5000 of column FAMILY:QUALIFIER + put.addColumn(FAMILY, QUALIFIER, ts[4], VALUES[4]); + ht.put(put); + + get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + get.readVersions(Integer.MAX_VALUE); + result = ht.get(get); + // verify version 1000,5000 remains for column FAMILY:QUALIFIER + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[4] }, + new byte[][] { VALUES[0], VALUES[4] }, 0, 1); + } + } + + /** + * Test for HBASE-17125 + */ + @TestTemplate + public void testReadWithFilter() throws Exception { + TEST_UTIL.createTable(tableName, FAMILY, 3); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + byte[] VALUEA = Bytes.toBytes("value-a"); + byte[] VALUEB = Bytes.toBytes("value-b"); + long[] ts = { 1000, 2000, 3000, 4000 }; + + Put put = new Put(ROW); + // Put version 1000,2000,3000,4000 of column FAMILY:QUALIFIER + for (int t = 0; t <= 3; t++) { + if (t <= 1) { + put.addColumn(FAMILY, QUALIFIER, ts[t], VALUEA); + } else { + put.addColumn(FAMILY, QUALIFIER, ts[t], VALUEB); + } + } + table.put(put); + + Scan scan = new Scan() + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(3); + Result result = getSingleScanResult(table, scan); + // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); + + Get get = new Get(ROW) + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(3); + result = table.get(get); + // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); + + // Test with max versions 1, it should still read ts[1] + scan = new Scan() + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(1); + result = getSingleScanResult(table, scan); + // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); + + // Test with max versions 1, it should still read ts[1] + get = new Get(ROW) + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(1); + result = table.get(get); + // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); + + // Test with max versions 5, it should still read ts[1] + scan = new Scan() + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(5); + result = getSingleScanResult(table, scan); + // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); + + // Test with max versions 5, it should still read ts[1] + get = new Get(ROW) + .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) + .readVersions(5); + result = table.get(get); + // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 + assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, + 0, 0); + } + } + + @TestTemplate + public void testCellUtilTypeMethods() throws IOException { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table table = conn.getTable(tableName)) { + final byte[] row = Bytes.toBytes("p"); + Put p = new Put(row); + p.addColumn(FAMILY, QUALIFIER, VALUE); + table.put(p); + + try (ResultScanner scanner = table.getScanner(new Scan())) { + Result result = scanner.next(); + assertNotNull(result); + CellScanner cs = result.cellScanner(); + assertTrue(cs.advance()); + Cell c = cs.current(); + assertTrue(CellUtil.isPut(c)); + assertFalse(CellUtil.isDelete(c)); + assertFalse(cs.advance()); + assertNull(scanner.next()); + } + + Delete d = new Delete(row); + d.addColumn(FAMILY, QUALIFIER); + table.delete(d); + + Scan scan = new Scan(); + scan.setRaw(true); + try (ResultScanner scanner = table.getScanner(scan)) { + Result result = scanner.next(); + assertNotNull(result); + CellScanner cs = result.cellScanner(); + assertTrue(cs.advance()); + + // First cell should be the delete (masking the Put) + Cell c = cs.current(); + assertTrue(CellUtil.isDelete(c), "Cell should be a Delete: " + c); + assertFalse(CellUtil.isPut(c), "Cell should not be a Put: " + c); + + // Second cell should be the original Put + assertTrue(cs.advance()); + c = cs.current(); + assertFalse(CellUtil.isDelete(c), "Cell should not be a Delete: " + c); + assertTrue(CellUtil.isPut(c), "Cell should be a Put: " + c); + + // No more cells in this row + assertFalse(cs.advance()); + + // No more results in this scan + assertNull(scanner.next()); + } + } + } + + @TestTemplate + public void testCreateTableWithZeroRegionReplicas() throws Exception { + TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))) + .setRegionReplication(0).build(); + + try (Connection conn = getConnection(); Admin admin = conn.getAdmin()) { + assertThrows(DoNotRetryIOException.class, () -> admin.createTable(desc)); + } + } + + @TestTemplate + public void testModifyTableWithZeroRegionReplicas() throws Exception { + TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build(); + TableDescriptor newDesc = + TableDescriptorBuilder.newBuilder(desc).setRegionReplication(0).build(); + try (Connection conn = getConnection(); Admin admin = conn.getAdmin()) { + admin.createTable(desc); + assertThrows(DoNotRetryIOException.class, () -> admin.modifyTable(newDesc)); + } + } + + @TestTemplate + public void testModifyTableWithMemstoreData() throws Exception { + createTableAndValidateTableSchemaModification(tableName, true); + } + + @TestTemplate + public void testDeleteCFWithMemstoreData() throws Exception { + createTableAndValidateTableSchemaModification(tableName, false); + } + + /** + * Create table and validate online schema modification + * @param tableName Table name + * @param modifyTable Modify table if true otherwise delete column family + * @throws IOException in case of failures + */ + private void createTableAndValidateTableSchemaModification(TableName tableName, + boolean modifyTable) throws Exception { + try (Connection conn = getConnection(); Admin admin = conn.getAdmin()) { + // Create table with two Cfs + byte[] cf1 = Bytes.toBytes("cf1"); + byte[] cf2 = Bytes.toBytes("cf2"); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf2)).build(); + admin.createTable(tableDesc); + + Table t = TEST_UTIL.getConnection().getTable(tableName); + // Insert few records and flush the table + t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val1"))); + t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2"))); + admin.flush(tableName); + Path tableDir = CommonFSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), tableName); + List regionDirs = FSUtils.getRegionDirs(TEST_UTIL.getTestFileSystem(), tableDir); + assertEquals(1, regionDirs.size()); + List familyDirs = + FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0)); + assertEquals(2, familyDirs.size()); + + // Insert record but dont flush the table + t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val2"))); + t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2"))); + + if (modifyTable) { + tableDesc = TableDescriptorBuilder.newBuilder(tableDesc).removeColumnFamily(cf2).build(); + admin.modifyTable(tableDesc); + } else { + admin.deleteColumnFamily(tableName, cf2); + } + // After table modification or delete family there should be only one CF in FS + familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0)); + assertEquals(1, familyDirs.size(), "CF dir count should be 1, but was " + familyDirs.size()); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestBase.java similarity index 93% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestBase.java index fbd6e25c0761..8e199e574fb0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestBase.java @@ -26,6 +26,7 @@ import java.util.Arrays; import java.util.Iterator; import java.util.List; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.StartTestingClusterOption; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.Filter; @@ -43,29 +45,21 @@ import org.apache.hadoop.hbase.filter.RowFilter; import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; import org.apache.hadoop.hbase.filter.WhileMatchFilter; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.NonRepeatedEnvironmentEdge; import org.apache.hadoop.hbase.util.TableDescriptorChecker; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestInfo; +import org.junit.jupiter.params.provider.Arguments; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; - -/** - * Base for TestFromClientSide* classes. Has common defines and utility used by all. - */ -@Category({ LargeTests.class, ClientTests.class }) -@SuppressWarnings("deprecation") -@RunWith(Parameterized.class) -class FromClientSideBase { +public class FromClientSideTestBase { private static final Logger LOG = LoggerFactory.getLogger(FromClientSideBase.class); - static HBaseTestingUtil TEST_UTIL; + protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); static byte[] ROW = Bytes.toBytes("testRow"); static byte[] FAMILY = Bytes.toBytes("testFamily"); static final byte[] INVALID_FAMILY = Bytes.toBytes("invalidTestFamily"); @@ -73,38 +67,39 @@ class FromClientSideBase { static byte[] VALUE = Bytes.toBytes("testValue"); static int SLAVES = 1; - // To keep the child classes happy. - FromClientSideBase() { + protected Class registryImpl; + protected int numHedgedReqs; + + protected TableName tableName; + + protected FromClientSideTestBase(Class registryImpl, + int numHedgedReqs) { + this.registryImpl = registryImpl; + this.numHedgedReqs = numHedgedReqs; } - /** - * JUnit does not provide an easy way to run a hook after each parameterized run. Without that - * there is no easy way to restart the test cluster after each parameterized run. Annotation - * BeforeParam does not work either because it runs before parameterization and hence does not - * have access to the test parameters (which is weird). This *hack* checks if the current instance - * of test cluster configuration has the passed parameterized configs. In such a case, we can just - * reuse the cluster for test and do not need to initialize from scratch. While this is a hack, it - * saves a ton of time for the full test and de-flakes it. - */ - protected static boolean isSameParameterizedCluster(Class registryImpl, int numHedgedReqs) { - if (TEST_UTIL == null) { - return false; - } - Configuration conf = TEST_UTIL.getConfiguration(); - Class confClass = conf.getClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - ZKConnectionRegistry.class); - int hedgedReqConfig = conf.getInt(MasterRegistry.MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, - AbstractRpcBasedConnectionRegistry.HEDGED_REQS_FANOUT_DEFAULT); - return confClass.getName().equals(registryImpl.getName()) && numHedgedReqs == hedgedReqConfig; + @BeforeEach + public void setUp(TestInfo testInfo) { + tableName = TableName.valueOf(testInfo.getTestMethod().get().getName() + + testInfo.getDisplayName().replaceAll("[^A-Za-z0-9_]", "_")); } - protected static final void initialize(Class registryImpl, - int numHedgedReqs, Class... cps) throws Exception { - // initialize() is called for every unit test, however we only want to reset the cluster state - // at the end of every parameterized run. - if (isSameParameterizedCluster(registryImpl, numHedgedReqs)) { - return; + @AfterEach + public void tearDown() throws Exception { + for (TableDescriptor htd : TEST_UTIL.getAdmin().listTableDescriptors()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + TEST_UTIL.deleteTable(htd.getTableName()); } + } + + @SuppressWarnings("deprecation") + public static Stream parameters() { + return Stream.of(Arguments.of(RpcConnectionRegistry.class, 1), + Arguments.of(RpcConnectionRegistry.class, 2), Arguments.of(MasterRegistry.class, 1), + Arguments.of(MasterRegistry.class, 2), Arguments.of(ZKConnectionRegistry.class, 1)); + } + + protected static final void initialize(Class... cps) throws Exception { // Uncomment the following lines if more verbosity is needed for // debugging (see HBASE-12285 for details). // ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); @@ -112,29 +107,32 @@ protected static final void initialize(Class regis // ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); // make sure that we do not get the same ts twice, see HBASE-19731 for more details. EnvironmentEdgeManager.injectEdge(new NonRepeatedEnvironmentEdge()); - if (TEST_UTIL != null) { - // We reached end of a parameterized run, clean up. - TEST_UTIL.shutdownMiniCluster(); - } - TEST_UTIL = new HBaseTestingUtil(); Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, Arrays.stream(cps).map(Class::getName).toArray(String[]::new)); conf.setBoolean(TableDescriptorChecker.TABLE_SANITY_CHECKS, true); // enable for below tests + // Use multiple masters for support hedged reads in registry + TEST_UTIL.startMiniCluster( + StartTestingClusterOption.builder().numMasters(3).numRegionServers(SLAVES).build()); + } + + @AfterAll + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @SuppressWarnings("deprecation") + protected final Configuration getClientConf() { + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, registryImpl, ConnectionRegistry.class); - Preconditions.checkArgument(numHedgedReqs > 0); + conf.setInt(RpcConnectionRegistry.HEDGED_REQS_FANOUT_KEY, numHedgedReqs); conf.setInt(MasterRegistry.MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, numHedgedReqs); - StartTestingClusterOption.Builder builder = StartTestingClusterOption.builder(); - // Multiple masters needed only when hedged reads for master registry are enabled. - builder.numMasters(numHedgedReqs > 1 ? 3 : 1).numRegionServers(SLAVES); - TEST_UTIL.startMiniCluster(builder.build()); + return conf; } - protected static void afterClass() throws Exception { - if (TEST_UTIL != null) { - TEST_UTIL.shutdownMiniCluster(); - } + protected final Connection getConnection() throws IOException { + return ConnectionFactory.createConnection(getClientConf()); } protected void deleteColumns(Table ht, String value, String keyPrefix) throws IOException { @@ -274,10 +272,9 @@ private List waitOnSplit(final Table t) throws IOException { } protected Result getSingleScanResult(Table ht, Scan scan) throws IOException { - ResultScanner scanner = ht.getScanner(scan); - Result result = scanner.next(); - scanner.close(); - return result; + try (ResultScanner scanner = ht.getScanner(scan)) { + return scanner.next(); + } } byte[][] makeNAscii(byte[] base, int n) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestFilterAcrossMultipleRegions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestFilterAcrossMultipleRegions.java new file mode 100644 index 000000000000..a5d3acf50a4a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestFilterAcrossMultipleRegions.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.apache.hadoop.hbase.HBaseTestingUtil.countRows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.HRegionLocation; +import org.junit.jupiter.api.TestTemplate; + +public class FromClientSideTestFilterAcrossMultipleRegions extends FromClientSideTestBase { + + protected FromClientSideTestFilterAcrossMultipleRegions( + Class registryImpl, int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + /** + * Test filters when multiple regions. It does counts. Needs eye-balling of logs to ensure that + * we're not scanning more regions that we're supposed to. Related to the TestFilterAcrossRegions + * over in the o.a.h.h.filter package. + */ + @TestTemplate + public void testFilterAcrossMultipleRegions() throws IOException { + TEST_UTIL.createTable(tableName, FAMILY); + try (Connection conn = getConnection(); Table t = conn.getTable(tableName)) { + int rowCount = TEST_UTIL.loadTable(t, FAMILY, false); + assertRowCount(t, rowCount); + // Split the table. Should split on a reasonable key; 'lqj' + List regions = splitTable(t); + assertRowCount(t, rowCount); + // Get end key of first region. + byte[] endKey = regions.get(0).getRegion().getEndKey(); + // Count rows with a filter that stops us before passed 'endKey'. + // Should be count of rows in first region. + int endKeyCount = countRows(t, createScanWithRowFilter(endKey)); + assertTrue(endKeyCount < rowCount); + + // How do I know I did not got to second region? Thats tough. Can't really + // do that in client-side region test. I verified by tracing in debugger. + // I changed the messages that come out when set to DEBUG so should see + // when scanner is done. Says "Finished with scanning..." with region name. + // Check that its finished in right region. + + // New test. Make it so scan goes into next region by one and then two. + // Make sure count comes out right. + byte[] key = new byte[] { endKey[0], endKey[1], (byte) (endKey[2] + 1) }; + int plusOneCount = countRows(t, createScanWithRowFilter(key)); + assertEquals(endKeyCount + 1, plusOneCount); + key = new byte[] { endKey[0], endKey[1], (byte) (endKey[2] + 2) }; + int plusTwoCount = countRows(t, createScanWithRowFilter(key)); + assertEquals(endKeyCount + 2, plusTwoCount); + + // New test. Make it so I scan one less than endkey. + key = new byte[] { endKey[0], endKey[1], (byte) (endKey[2] - 1) }; + int minusOneCount = countRows(t, createScanWithRowFilter(key)); + assertEquals(endKeyCount - 1, minusOneCount); + // For above test... study logs. Make sure we do "Finished with scanning.." + // in first region and that we do not fall into the next region. + + key = new byte[] { 'a', 'a', 'a' }; + int countBBB = countRows(t, createScanWithRowFilter(key, null, CompareOperator.EQUAL)); + assertEquals(1, countBBB); + + int countGreater = + countRows(t, createScanWithRowFilter(endKey, null, CompareOperator.GREATER_OR_EQUAL)); + // Because started at start of table. + assertEquals(0, countGreater); + countGreater = + countRows(t, createScanWithRowFilter(endKey, endKey, CompareOperator.GREATER_OR_EQUAL)); + assertEquals(rowCount - endKeyCount, countGreater); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientDataStructureMisc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientDataStructureMisc.java new file mode 100644 index 000000000000..b2cdcde2a545 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientDataStructureMisc.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.IOException; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +@Tag(ClientTests.TAG) +@Tag(SmallTests.TAG) +public class TestClientDataStructureMisc { + + @Test + public void testAddKeyValue() { + final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); + final byte[] value = Bytes.toBytes("abcd"); + final byte[] row1 = Bytes.toBytes("row1"); + final byte[] row2 = Bytes.toBytes("row2"); + byte[] qualifier = Bytes.toBytes("qf1"); + Put put = new Put(row1); + + // Adding KeyValue with the same row + KeyValue kv = new KeyValue(row1, CONTENTS_FAMILY, qualifier, value); + boolean ok = true; + try { + put.add(kv); + } catch (IOException e) { + ok = false; + } + assertTrue(ok); + + // Adding KeyValue with the different row + kv = new KeyValue(row2, CONTENTS_FAMILY, qualifier, value); + ok = false; + try { + put.add(kv); + } catch (IOException e) { + ok = true; + } + assertTrue(ok); + } + + /** + * For HBASE-2156 + */ + @Test + public void testScanVariableReuse() { + byte[] family = Bytes.toBytes("family"); + byte[] qual = Bytes.toBytes("qual"); + Scan scan = new Scan(); + scan.addFamily(family); + scan.addColumn(family, qual); + + assertEquals(1, scan.getFamilyMap().get(family).size()); + + scan = new Scan(); + scan.addFamily(family); + + assertNull(scan.getFamilyMap().get(family)); + assertTrue(scan.getFamilyMap().containsKey(family)); + } + + @Test + public void testNegativeTimestamp() throws IOException { + IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, + () -> new Put(Bytes.toBytes("row"), -1), "Negative timestamps should not have been allowed"); + assertThat(ex.getMessage(), containsString("negative")); + + ex = assertThrows( + IllegalArgumentException.class, () -> new Put(Bytes.toBytes("row")) + .addColumn(Bytes.toBytes("f"), Bytes.toBytes("q"), -1, Bytes.toBytes("v")), + "Negative timestamps should not have been allowed"); + assertThat(ex.getMessage(), containsString("negative")); + + ex = assertThrows(IllegalArgumentException.class, () -> new Delete(Bytes.toBytes("row"), -1), + "Negative timestamps should not have been allowed"); + assertThat(ex.getMessage(), containsString("negative")); + + ex = assertThrows(IllegalArgumentException.class, + () -> new Delete(Bytes.toBytes("row")).addFamily(Bytes.toBytes("f"), -1), + "Negative timestamps should not have been allowed"); + assertThat(ex.getMessage(), containsString("negative")); + + ex = assertThrows(IllegalArgumentException.class, () -> new Scan().setTimeRange(-1, 1), + "Negative timestamps should not have been allowed"); + assertThat(ex.getMessage(), containsString("negative")); + + // KeyValue should allow negative timestamps for backwards compat. Otherwise, if the user + // already has negative timestamps in cluster data, HBase won't be able to handle that + new KeyValue(Bytes.toBytes(42), Bytes.toBytes(42), Bytes.toBytes(42), -1, Bytes.toBytes(42)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionReconnect.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionReconnect.java new file mode 100644 index 000000000000..0c4639e44afa --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionReconnect.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.stream.Stream; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ClusterMetrics.Option; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.SingleProcessHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; + +/** + * Test of that unmanaged HConnections are able to reconnect properly (see HBASE-5058) + */ +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}") +public class TestConnectionReconnect { + + private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); + + private static TableName NAME = TableName.valueOf("reconnect"); + + private Class registryImpl; + + public TestConnectionReconnect(Class registryImpl) { + this.registryImpl = registryImpl; + } + + @SuppressWarnings("deprecation") + public static Stream parameters() { + return Stream.of(Arguments.of(RpcConnectionRegistry.class), + Arguments.of(ZKConnectionRegistry.class)); + } + + @BeforeAll + public static void setUpBeforeAll() throws Exception { + UTIL.startMiniCluster(1); + UTIL.createTable(NAME, HConstants.CATALOG_FAMILY); + UTIL.waitTableAvailable(NAME); + } + + public static void tearDownAfterAll() throws Exception { + UTIL.shutdownMiniCluster(); + } + + private Connection getConnection() throws IOException { + Configuration conf = new Configuration(UTIL.getConfiguration()); + conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, registryImpl, + ConnectionRegistry.class); + conf.set(RpcConnectionRegistry.BOOTSTRAP_NODES, + UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName().getAddress().toString()); + return ConnectionFactory.createConnection(conf); + } + + @TestTemplate + public void testReconnect() throws Exception { + try (Connection conn = getConnection()) { + try (Table t = conn.getTable(NAME); Admin admin = conn.getAdmin()) { + assertTrue(admin.tableExists(NAME)); + assertTrue(t.get(new Get(Bytes.toBytes(0))).isEmpty()); + } + + // stop the master + SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster(); + + cluster.stopMaster(0, false); + cluster.waitOnMaster(0); + + // start up a new master + cluster.startMaster(); + assertTrue(cluster.waitForActiveAndReadyMaster()); + + // test that the same unmanaged connection works with a new + // Admin and can connect to the new master; + try (Admin admin = conn.getAdmin()) { + assertTrue(admin.tableExists(NAME)); + assertEquals( + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().size(), + 1); + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide1.java new file mode 100644 index 000000000000..edaf49b5fd8d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide1.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; + +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSide1 extends FromClientSideTest1 { + + public TestFromClientSide1(Class registryImpl, int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + @BeforeAll + public static void setUpBeforeClass() throws Exception { + initialize(MultiRowMutationEndpoint.class); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide1WithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide1WithCoprocessor.java new file mode 100644 index 000000000000..ced7185f59c0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide1WithCoprocessor.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; + +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSide1WithCoprocessor extends FromClientSideTest1 { + + public TestFromClientSide1WithCoprocessor(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + @BeforeAll + public static void setUpBeforeClass() throws Exception { + initialize(NoOpScanPolicyObserver.class, MultiRowMutationEndpoint.class); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index daad7ce31886..3cdc0cb8820b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.jupiter.api.BeforeAll; @@ -24,10 +26,15 @@ @Tag(LargeTests.TAG) @Tag(ClientTests.TAG) -public class TestFromClientSide3 extends FromClientSide3TestBase { +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSide3 extends FromClientSideTest3 { + + public TestFromClientSide3(Class registryImpl, int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } @BeforeAll public static void setUpBeforeAll() throws Exception { - startCluster(); + startCluster(MultiRowMutationEndpoint.class); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3WithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3WithCoprocessor.java new file mode 100644 index 000000000000..d448172fa644 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3WithCoprocessor.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; + +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSide3WithCoprocessor extends FromClientSideTest3 { + + public TestFromClientSide3WithCoprocessor(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + @BeforeAll + public static void setUpBeforeAll() throws Exception { + startCluster(NoOpScanPolicyObserver.class, MultiRowMutationEndpoint.class); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java index e1de14d618d7..5701626cf301 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4.java @@ -17,1339 +17,30 @@ */ package org.apache.hadoop.hbase.client; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.EnumSet; -import java.util.HashSet; -import java.util.List; -import java.util.NavigableMap; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.ClusterMetrics.Option; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.SingleProcessHBaseCluster; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNameTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; -import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.Assume; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * Run tests that use the HBase clients; {@link Table}. Sets up the HBase mini cluster once at start * and runs through all client tests. Each creates a table named for the method and does its stuff * against that. Parameterized to run with different registry implementations. */ -@Category({ LargeTests.class, ClientTests.class }) -@SuppressWarnings("deprecation") -@RunWith(Parameterized.class) -public class TestFromClientSide4 extends FromClientSideBase { - private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide4.class); - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSide4.class); - @Rule - public TableNameTestRule name = new TableNameTestRule(); - - // To keep the child classes happy. - TestFromClientSide4() { - } - - public TestFromClientSide4(Class registry, int numHedgedReqs) throws Exception { - initialize(registry, numHedgedReqs, MultiRowMutationEndpoint.class); - } - - @Parameterized.Parameters - public static Collection parameters() { - return Arrays.asList(new Object[][] { { MasterRegistry.class, 1 }, { MasterRegistry.class, 2 }, - { ZKConnectionRegistry.class, 1 } }); - } +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSide4 extends FromClientSideTest4 { - @AfterClass - public static void tearDownAfterClass() throws Exception { - afterClass(); + protected TestFromClientSide4(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); } - /** - * Test batch operations with combination of valid and invalid args - */ - @Test - public void testBatchOperationsWithErrors() throws Exception { - final TableName tableName = name.getTableName(); - try (Table foo = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 10)) { - - int NUM_OPS = 100; - - // 1.1 Put with no column families (local validation, runtime exception) - List puts = new ArrayList<>(NUM_OPS); - for (int i = 0; i != NUM_OPS; i++) { - Put put = new Put(Bytes.toBytes(i)); - puts.add(put); - } - - try { - foo.put(puts); - fail(); - } catch (IllegalArgumentException e) { - // expected - assertEquals(NUM_OPS, puts.size()); - } - - // 1.2 Put with invalid column family - puts.clear(); - for (int i = 0; i < NUM_OPS; i++) { - Put put = new Put(Bytes.toBytes(i)); - put.addColumn((i % 2) == 0 ? FAMILY : INVALID_FAMILY, FAMILY, Bytes.toBytes(i)); - puts.add(put); - } - - try { - foo.put(puts); - fail(); - } catch (RetriesExhaustedException e) { - // expected - assertThat(e.getCause(), instanceOf(NoSuchColumnFamilyException.class)); - } - - // 2.1 Get non-existent rows - List gets = new ArrayList<>(NUM_OPS); - for (int i = 0; i < NUM_OPS; i++) { - Get get = new Get(Bytes.toBytes(i)); - gets.add(get); - } - Result[] getsResult = foo.get(gets); - assertNotNull(getsResult); - assertEquals(NUM_OPS, getsResult.length); - for (int i = 0; i < NUM_OPS; i++) { - Result getResult = getsResult[i]; - if (i % 2 == 0) { - assertFalse(getResult.isEmpty()); - } else { - assertTrue(getResult.isEmpty()); - } - } - - // 2.2 Get with invalid column family - gets.clear(); - for (int i = 0; i < NUM_OPS; i++) { - Get get = new Get(Bytes.toBytes(i)); - get.addColumn((i % 2) == 0 ? FAMILY : INVALID_FAMILY, FAMILY); - gets.add(get); - } - try { - foo.get(gets); - fail(); - } catch (RetriesExhaustedException e) { - // expected - assertThat(e.getCause(), instanceOf(NoSuchColumnFamilyException.class)); - } - - // 3.1 Delete with invalid column family - List deletes = new ArrayList<>(NUM_OPS); - for (int i = 0; i < NUM_OPS; i++) { - Delete delete = new Delete(Bytes.toBytes(i)); - delete.addColumn((i % 2) == 0 ? FAMILY : INVALID_FAMILY, FAMILY); - deletes.add(delete); - } - try { - foo.delete(deletes); - fail(); - } catch (RetriesExhaustedException e) { - // expected - assertThat(e.getCause(), instanceOf(NoSuchColumnFamilyException.class)); - } - - // all valid rows should have been deleted - gets.clear(); - for (int i = 0; i < NUM_OPS; i++) { - Get get = new Get(Bytes.toBytes(i)); - gets.add(get); - } - getsResult = foo.get(gets); - assertNotNull(getsResult); - assertEquals(NUM_OPS, getsResult.length); - for (Result getResult : getsResult) { - assertTrue(getResult.isEmpty()); - } - - // 3.2 Delete non-existent rows - deletes.clear(); - for (int i = 0; i < NUM_OPS; i++) { - Delete delete = new Delete(Bytes.toBytes(i)); - deletes.add(delete); - } - foo.delete(deletes); - } - } - - // - // JIRA Testers - // - - /** - * HBASE-867 If millions of columns in a column family, hbase scanner won't come up Test will - * create numRows rows, each with numColsPerRow columns (1 version each), and attempt to scan them - * all. To test at scale, up numColsPerRow to the millions (have not gotten that to work running - * as junit though) - */ - @Test - public void testJiraTest867() throws Exception { - int numRows = 10; - int numColsPerRow = 2000; - - final TableName tableName = name.getTableName(); - - byte[][] ROWS = makeN(ROW, numRows); - byte[][] QUALIFIERS = makeN(QUALIFIER, numColsPerRow); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - - // Insert rows - - for (int i = 0; i < numRows; i++) { - Put put = new Put(ROWS[i]); - put.setDurability(Durability.SKIP_WAL); - for (int j = 0; j < numColsPerRow; j++) { - put.addColumn(FAMILY, QUALIFIERS[j], QUALIFIERS[j]); - } - assertEquals("Put expected to contain " + numColsPerRow + " columns but " + "only contains " - + put.size(), put.size(), numColsPerRow); - ht.put(put); - } - - // Get a row - Get get = new Get(ROWS[numRows - 1]); - Result result = ht.get(get); - assertNumKeys(result, numColsPerRow); - Cell[] keys = result.rawCells(); - for (int i = 0; i < result.size(); i++) { - assertKey(keys[i], ROWS[numRows - 1], FAMILY, QUALIFIERS[i], QUALIFIERS[i]); - } - - // Scan the rows - Scan scan = new Scan(); - try (ResultScanner scanner = ht.getScanner(scan)) { - int rowCount = 0; - while ((result = scanner.next()) != null) { - assertNumKeys(result, numColsPerRow); - Cell[] kvs = result.rawCells(); - for (int i = 0; i < numColsPerRow; i++) { - assertKey(kvs[i], ROWS[rowCount], FAMILY, QUALIFIERS[i], QUALIFIERS[i]); - } - rowCount++; - } - assertEquals( - "Expected to scan " + numRows + " rows but actually scanned " + rowCount + " rows", - rowCount, numRows); - } - - // flush and try again - - TEST_UTIL.flush(); - - // Get a row - get = new Get(ROWS[numRows - 1]); - result = ht.get(get); - assertNumKeys(result, numColsPerRow); - keys = result.rawCells(); - for (int i = 0; i < result.size(); i++) { - assertKey(keys[i], ROWS[numRows - 1], FAMILY, QUALIFIERS[i], QUALIFIERS[i]); - } - - // Scan the rows - scan = new Scan(); - try (ResultScanner scanner = ht.getScanner(scan)) { - int rowCount = 0; - while ((result = scanner.next()) != null) { - assertNumKeys(result, numColsPerRow); - Cell[] kvs = result.rawCells(); - for (int i = 0; i < numColsPerRow; i++) { - assertKey(kvs[i], ROWS[rowCount], FAMILY, QUALIFIERS[i], QUALIFIERS[i]); - } - rowCount++; - } - assertEquals( - "Expected to scan " + numRows + " rows but actually scanned " + rowCount + " rows", - rowCount, numRows); - } - } - } - - /** - * HBASE-861 get with timestamp will return a value if there is a version with an earlier - * timestamp - */ - @Test - public void testJiraTest861() throws Exception { - final TableName tableName = name.getTableName(); - byte[][] VALUES = makeNAscii(VALUE, 7); - long[] STAMPS = makeStamps(7); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - - // Insert three versions - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - ht.put(put); - - // Get the middle value - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - - // Try to get one version before (expect fail) - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[1]); - - // Try to get one version after (expect fail) - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[5]); - - // Try same from storefile - TEST_UTIL.flush(); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[1]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[5]); - - // Insert two more versions surrounding others, into memstore - put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[6], VALUES[6]); - ht.put(put); - - // Check we can get everything we should and can't get what we shouldn't - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[1]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[5]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[6], VALUES[6]); - - // Try same from two storefiles - TEST_UTIL.flush(); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[1]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[5]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[6], VALUES[6]); - } - } - - /** - * HBASE-33 Add a HTable get/obtainScanner method that retrieves all versions of a particular - * column and row between two timestamps - */ - @Test - public void testJiraTest33() throws Exception { - final TableName tableName = name.getTableName(); - byte[][] VALUES = makeNAscii(VALUE, 7); - long[] STAMPS = makeStamps(7); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - - // Insert lots versions - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - ht.put(put); - - getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 2); - getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 3); - - scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 2); - scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 3); - - // Try same from storefile - TEST_UTIL.flush(); - - getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 2); - getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - getVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 3); - - scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 2); - scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - scanVersionRangeAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 3); - } - } - - /** - * HBASE-1014 commit(BatchUpdate) method should return timestamp - */ - @Test - public void testJiraTest1014() throws Exception { - final TableName tableName = name.getTableName(); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - - long manualStamp = 12345; - - // Insert lots versions - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, manualStamp, VALUE); - ht.put(put); - - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, manualStamp, VALUE); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, manualStamp - 1); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, manualStamp + 1); - } - } - - /** - * HBASE-1182 Scan for columns > some timestamp - */ - @Test - public void testJiraTest1182() throws Exception { - final TableName tableName = name.getTableName(); - byte[][] VALUES = makeNAscii(VALUE, 7); - long[] STAMPS = makeStamps(7); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - - // Insert lots versions - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - ht.put(put); - - getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); - getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - - scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); - scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - - // Try same from storefile - TEST_UTIL.flush(); - - getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); - getVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - - scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 2, 5); - scanVersionRangeAndVerifyGreaterThan(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 4, 5); - } - } - - /** - * HBASE-52 Add a means of scanning over all versions - */ - @Test - public void testJiraTest52() throws Exception { - final TableName tableName = name.getTableName(); - byte[][] VALUES = makeNAscii(VALUE, 7); - long[] STAMPS = makeStamps(7); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - - // Insert lots versions - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, STAMPS[0], VALUES[0]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - ht.put(put); - - getAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - - scanAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - - // Try same from storefile - TEST_UTIL.flush(); - - getAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - - scanAllVersionsAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS, VALUES, 0, 5); - } - } - - @Test - @SuppressWarnings("checkstyle:MethodLength") - public void testDuplicateVersions() throws Exception { - final TableName tableName = name.getTableName(); - - long[] STAMPS = makeStamps(20); - byte[][] VALUES = makeNAscii(VALUE, 20); - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - - // Insert 4 versions of same column - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - ht.put(put); - - // Verify we can get each one properly - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - - // Verify we don't accidentally get others - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); - scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); - scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); - scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); - - // Ensure maxVersions in query is respected - Get get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(2); - Result result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[4], STAMPS[5] }, - new byte[][] { VALUES[4], VALUES[5] }, 0, 1); - - Scan scan = new Scan().withStartRow(ROW); - scan.addColumn(FAMILY, QUALIFIER); - scan.readVersions(2); - result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[4], STAMPS[5] }, - new byte[][] { VALUES[4], VALUES[5] }, 0, 1); - - // Flush and redo - - TEST_UTIL.flush(); - - // Verify we can get each one properly - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[4]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[5], VALUES[5]); - - // Verify we don't accidentally get others - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); - scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); - scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[3]); - scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[6]); - - // Ensure maxVersions in query is respected - get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(2); - result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[4], STAMPS[5] }, - new byte[][] { VALUES[4], VALUES[5] }, 0, 1); - - scan = new Scan().withStartRow(ROW); - scan.addColumn(FAMILY, QUALIFIER); - scan.readVersions(2); - result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { STAMPS[4], STAMPS[5] }, - new byte[][] { VALUES[4], VALUES[5] }, 0, 1); - - // Add some memstore and retest - - // Insert 4 more versions of same column and a dupe - put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, STAMPS[3], VALUES[3]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[4], VALUES[14]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[6], VALUES[6]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[7], VALUES[7]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[8], VALUES[8]); - ht.put(put); - - // Ensure maxVersions in query is respected - get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(7); - result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, - new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, - new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, - 0, 6); - - scan = new Scan().withStartRow(ROW); - scan.addColumn(FAMILY, QUALIFIER); - scan.readVersions(7); - result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, - new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, - new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, - 0, 6); - - get = new Get(ROW); - get.readVersions(7); - result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, - new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, - new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, - 0, 6); - - scan = new Scan().withStartRow(ROW); - scan.readVersions(7); - result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, - new long[] { STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8] }, - new byte[][] { VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], - VALUES[8] }, - 0, 6); - - // Verify we can get each one properly - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[14]); - getVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[7], VALUES[7]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[1], VALUES[1]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[2], VALUES[2]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[4], VALUES[14]); - scanVersionAndVerify(ht, ROW, FAMILY, QUALIFIER, STAMPS[7], VALUES[7]); - - // Verify we don't accidentally get others - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); - getVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[9]); - scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[0]); - scanVersionAndVerifyMissing(ht, ROW, FAMILY, QUALIFIER, STAMPS[9]); - - // Ensure maxVersions of table is respected - - TEST_UTIL.flush(); - - // Insert 4 more versions of same column and a dupe - put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, STAMPS[9], VALUES[9]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[11], VALUES[11]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[13], VALUES[13]); - put.addColumn(FAMILY, QUALIFIER, STAMPS[15], VALUES[15]); - ht.put(put); - - get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(Integer.MAX_VALUE); - result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, - new long[] { STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], - STAMPS[11], STAMPS[13], STAMPS[15] }, - new byte[][] { VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], - VALUES[11], VALUES[13], VALUES[15] }, - 0, 9); - - scan = new Scan().withStartRow(ROW); - scan.addColumn(FAMILY, QUALIFIER); - scan.readVersions(Integer.MAX_VALUE); - result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, - new long[] { STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[7], STAMPS[8], STAMPS[9], - STAMPS[11], STAMPS[13], STAMPS[15] }, - new byte[][] { VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[7], VALUES[8], VALUES[9], - VALUES[11], VALUES[13], VALUES[15] }, - 0, 9); - - // Delete a version in the memstore and a version in a storefile - Delete delete = new Delete(ROW); - delete.addColumn(FAMILY, QUALIFIER, STAMPS[11]); - delete.addColumn(FAMILY, QUALIFIER, STAMPS[7]); - ht.delete(delete); - - // Test that it's gone - get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(Integer.MAX_VALUE); - result = ht.get(get); - assertNResult(result, ROW, FAMILY, QUALIFIER, - new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], - STAMPS[9], STAMPS[13], STAMPS[15] }, - new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[8], - VALUES[9], VALUES[13], VALUES[15] }, - 0, 9); - - scan = new Scan().withStartRow(ROW); - scan.addColumn(FAMILY, QUALIFIER); - scan.readVersions(Integer.MAX_VALUE); - result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILY, QUALIFIER, - new long[] { STAMPS[1], STAMPS[2], STAMPS[3], STAMPS[4], STAMPS[5], STAMPS[6], STAMPS[8], - STAMPS[9], STAMPS[13], STAMPS[15] }, - new byte[][] { VALUES[1], VALUES[2], VALUES[3], VALUES[14], VALUES[5], VALUES[6], VALUES[8], - VALUES[9], VALUES[13], VALUES[15] }, - 0, 9); - } - } - - @Test - public void testUpdates() throws Exception { - final TableName tableName = name.getTableName(); - try (Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10)) { - - // Write a column with values at timestamp 1, 2 and 3 - byte[] row = Bytes.toBytes("row1"); - byte[] qualifier = Bytes.toBytes("myCol"); - Put put = new Put(row); - put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("AAA")); - hTable.put(put); - - put = new Put(row); - put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("BBB")); - hTable.put(put); - - put = new Put(row); - put.addColumn(FAMILY, qualifier, 3L, Bytes.toBytes("EEE")); - hTable.put(put); - - Get get = new Get(row); - get.addColumn(FAMILY, qualifier); - get.readAllVersions(); - - // Check that the column indeed has the right values at timestamps 1 and - // 2 - Result result = hTable.get(get); - NavigableMap navigableMap = result.getMap().get(FAMILY).get(qualifier); - assertEquals("AAA", Bytes.toString(navigableMap.get(1L))); - assertEquals("BBB", Bytes.toString(navigableMap.get(2L))); - - // Update the value at timestamp 1 - put = new Put(row); - put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("CCC")); - hTable.put(put); - - // Update the value at timestamp 2 - put = new Put(row); - put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("DDD")); - hTable.put(put); - - // Check that the values at timestamp 2 and 1 got updated - result = hTable.get(get); - navigableMap = result.getMap().get(FAMILY).get(qualifier); - assertEquals("CCC", Bytes.toString(navigableMap.get(1L))); - assertEquals("DDD", Bytes.toString(navigableMap.get(2L))); - } - } - - @Test - public void testUpdatesWithMajorCompaction() throws Exception { - final TableName tableName = name.getTableName(); - try (Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10); - Admin admin = TEST_UTIL.getAdmin()) { - - // Write a column with values at timestamp 1, 2 and 3 - byte[] row = Bytes.toBytes("row2"); - byte[] qualifier = Bytes.toBytes("myCol"); - Put put = new Put(row); - put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("AAA")); - hTable.put(put); - - put = new Put(row); - put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("BBB")); - hTable.put(put); - - put = new Put(row); - put.addColumn(FAMILY, qualifier, 3L, Bytes.toBytes("EEE")); - hTable.put(put); - - Get get = new Get(row); - get.addColumn(FAMILY, qualifier); - get.readAllVersions(); - - // Check that the column indeed has the right values at timestamps 1 and - // 2 - Result result = hTable.get(get); - NavigableMap navigableMap = result.getMap().get(FAMILY).get(qualifier); - assertEquals("AAA", Bytes.toString(navigableMap.get(1L))); - assertEquals("BBB", Bytes.toString(navigableMap.get(2L))); - - // Trigger a major compaction - admin.flush(tableName); - admin.majorCompact(tableName); - Thread.sleep(6000); - - // Update the value at timestamp 1 - put = new Put(row); - put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("CCC")); - hTable.put(put); - - // Update the value at timestamp 2 - put = new Put(row); - put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("DDD")); - hTable.put(put); - - // Trigger a major compaction - admin.flush(tableName); - admin.majorCompact(tableName); - Thread.sleep(6000); - - // Check that the values at timestamp 2 and 1 got updated - result = hTable.get(get); - navigableMap = result.getMap().get(FAMILY).get(qualifier); - assertEquals("CCC", Bytes.toString(navigableMap.get(1L))); - assertEquals("DDD", Bytes.toString(navigableMap.get(2L))); - } - } - - @Test - public void testMajorCompactionBetweenTwoUpdates() throws Exception { - final TableName tableName = name.getTableName(); - try (Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10); - Admin admin = TEST_UTIL.getAdmin()) { - - // Write a column with values at timestamp 1, 2 and 3 - byte[] row = Bytes.toBytes("row3"); - byte[] qualifier = Bytes.toBytes("myCol"); - Put put = new Put(row); - put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("AAA")); - hTable.put(put); - - put = new Put(row); - put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("BBB")); - hTable.put(put); - - put = new Put(row); - put.addColumn(FAMILY, qualifier, 3L, Bytes.toBytes("EEE")); - hTable.put(put); - - Get get = new Get(row); - get.addColumn(FAMILY, qualifier); - get.readAllVersions(); - - // Check that the column indeed has the right values at timestamps 1 and - // 2 - Result result = hTable.get(get); - NavigableMap navigableMap = result.getMap().get(FAMILY).get(qualifier); - assertEquals("AAA", Bytes.toString(navigableMap.get(1L))); - assertEquals("BBB", Bytes.toString(navigableMap.get(2L))); - - // Trigger a major compaction - admin.flush(tableName); - admin.majorCompact(tableName); - Thread.sleep(6000); - - // Update the value at timestamp 1 - put = new Put(row); - put.addColumn(FAMILY, qualifier, 1L, Bytes.toBytes("CCC")); - hTable.put(put); - - // Trigger a major compaction - admin.flush(tableName); - admin.majorCompact(tableName); - Thread.sleep(6000); - - // Update the value at timestamp 2 - put = new Put(row); - put.addColumn(FAMILY, qualifier, 2L, Bytes.toBytes("DDD")); - hTable.put(put); - - // Trigger a major compaction - admin.flush(tableName); - admin.majorCompact(tableName); - Thread.sleep(6000); - - // Check that the values at timestamp 2 and 1 got updated - result = hTable.get(get); - navigableMap = result.getMap().get(FAMILY).get(qualifier); - - assertEquals("CCC", Bytes.toString(navigableMap.get(1L))); - assertEquals("DDD", Bytes.toString(navigableMap.get(2L))); - } - } - - @Test - public void testGet_EmptyTable() throws IOException { - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - Get get = new Get(ROW); - get.addFamily(FAMILY); - Result r = table.get(get); - assertTrue(r.isEmpty()); - } - } - - @Test - public void testGet_NullQualifier() throws IOException { - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - put = new Put(ROW); - put.addColumn(FAMILY, null, VALUE); - table.put(put); - LOG.info("Row put"); - - Get get = new Get(ROW); - get.addColumn(FAMILY, null); - Result r = table.get(get); - assertEquals(1, r.size()); - - get = new Get(ROW); - get.addFamily(FAMILY); - r = table.get(get); - assertEquals(2, r.size()); - } - } - - @Test - public void testGet_NonExistentRow() throws IOException { - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - LOG.info("Row put"); - - Get get = new Get(ROW); - get.addFamily(FAMILY); - Result r = table.get(get); - assertFalse(r.isEmpty()); - System.out.println("Row retrieved successfully"); - - byte[] missingrow = Bytes.toBytes("missingrow"); - get = new Get(missingrow); - get.addFamily(FAMILY); - r = table.get(get); - assertTrue(r.isEmpty()); - LOG.info("Row missing as it should be"); - } - } - - @Test - public void testPut() throws IOException { - final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); - final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); - final byte[] row1 = Bytes.toBytes("row1"); - final byte[] row2 = Bytes.toBytes("row2"); - final byte[] value = Bytes.toBytes("abcd"); - try (Table table = - TEST_UTIL.createTable(name.getTableName(), new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY })) { - Put put = new Put(row1); - put.addColumn(CONTENTS_FAMILY, null, value); - table.put(put); - - put = new Put(row2); - put.addColumn(CONTENTS_FAMILY, null, value); - - assertEquals(1, put.size()); - assertEquals(1, put.getFamilyCellMap().get(CONTENTS_FAMILY).size()); - - // KeyValue v1 expectation. Cast for now until we go all Cell all the time. TODO - KeyValue kv = (KeyValue) put.getFamilyCellMap().get(CONTENTS_FAMILY).get(0); - - assertTrue(Bytes.equals(CellUtil.cloneFamily(kv), CONTENTS_FAMILY)); - // will it return null or an empty byte array? - assertTrue(Bytes.equals(CellUtil.cloneQualifier(kv), new byte[0])); - - assertTrue(Bytes.equals(CellUtil.cloneValue(kv), value)); - - table.put(put); - - Scan scan = new Scan(); - scan.addColumn(CONTENTS_FAMILY, null); - try (ResultScanner scanner = table.getScanner(scan)) { - for (Result r : scanner) { - for (Cell key : r.rawCells()) { - System.out.println(Bytes.toString(r.getRow()) + ": " + key.toString()); - } - } - } - } - } - - @Test - public void testPutNoCF() throws IOException { - final byte[] BAD_FAM = Bytes.toBytes("BAD_CF"); - final byte[] VAL = Bytes.toBytes(100); - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - boolean caughtNSCFE = false; - - try { - Put p = new Put(ROW); - p.addColumn(BAD_FAM, QUALIFIER, VAL); - table.put(p); - } catch (Exception e) { - caughtNSCFE = e instanceof NoSuchColumnFamilyException; - } - assertTrue("Should throw NoSuchColumnFamilyException", caughtNSCFE); - } - } - - @Test - public void testRowsPut() throws IOException { - final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); - final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); - final int NB_BATCH_ROWS = 10; - final byte[] value = Bytes.toBytes("abcd"); - try (Table table = - TEST_UTIL.createTable(name.getTableName(), new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY })) { - ArrayList rowsUpdate = new ArrayList<>(); - for (int i = 0; i < NB_BATCH_ROWS; i++) { - byte[] row = Bytes.toBytes("row" + i); - Put put = new Put(row); - put.setDurability(Durability.SKIP_WAL); - put.addColumn(CONTENTS_FAMILY, null, value); - rowsUpdate.add(put); - } - table.put(rowsUpdate); - Scan scan = new Scan(); - scan.addFamily(CONTENTS_FAMILY); - try (ResultScanner scanner = table.getScanner(scan)) { - int nbRows = Iterables.size(scanner); - assertEquals(NB_BATCH_ROWS, nbRows); - } - } - } - - @Test - public void testRowsPutBufferedManyManyFlushes() throws IOException { - final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); - final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); - final byte[] value = Bytes.toBytes("abcd"); - final int NB_BATCH_ROWS = 10; - try (Table table = - TEST_UTIL.createTable(name.getTableName(), new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY })) { - ArrayList rowsUpdate = new ArrayList<>(); - for (int i = 0; i < NB_BATCH_ROWS * 10; i++) { - byte[] row = Bytes.toBytes("row" + i); - Put put = new Put(row); - put.setDurability(Durability.SKIP_WAL); - put.addColumn(CONTENTS_FAMILY, null, value); - rowsUpdate.add(put); - } - table.put(rowsUpdate); - - Scan scan = new Scan(); - scan.addFamily(CONTENTS_FAMILY); - try (ResultScanner scanner = table.getScanner(scan)) { - int nbRows = Iterables.size(scanner); - assertEquals(NB_BATCH_ROWS * 10, nbRows); - } - } - } - - @Test - public void testAddKeyValue() { - final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); - final byte[] value = Bytes.toBytes("abcd"); - final byte[] row1 = Bytes.toBytes("row1"); - final byte[] row2 = Bytes.toBytes("row2"); - byte[] qualifier = Bytes.toBytes("qf1"); - Put put = new Put(row1); - - // Adding KeyValue with the same row - KeyValue kv = new KeyValue(row1, CONTENTS_FAMILY, qualifier, value); - boolean ok = true; - try { - put.add(kv); - } catch (IOException e) { - ok = false; - } - assertTrue(ok); - - // Adding KeyValue with the different row - kv = new KeyValue(row2, CONTENTS_FAMILY, qualifier, value); - ok = false; - try { - put.add(kv); - } catch (IOException e) { - ok = true; - } - assertTrue(ok); - } - - /** - * test for HBASE-737 - */ - @Test - public void testHBase737() throws IOException { - final byte[] FAM1 = Bytes.toBytes("fam1"); - final byte[] FAM2 = Bytes.toBytes("fam2"); - // Open table - try (Table table = TEST_UTIL.createTable(name.getTableName(), new byte[][] { FAM1, FAM2 })) { - // Insert some values - Put put = new Put(ROW); - put.addColumn(FAM1, Bytes.toBytes("letters"), Bytes.toBytes("abcdefg")); - table.put(put); - try { - Thread.sleep(1000); - } catch (InterruptedException i) { - // ignore - } - - put = new Put(ROW); - put.addColumn(FAM1, Bytes.toBytes("numbers"), Bytes.toBytes("123456")); - table.put(put); - - try { - Thread.sleep(1000); - } catch (InterruptedException i) { - // ignore - } - - put = new Put(ROW); - put.addColumn(FAM2, Bytes.toBytes("letters"), Bytes.toBytes("hijklmnop")); - table.put(put); - - long[] times = new long[3]; - - // First scan the memstore - - Scan scan = new Scan(); - scan.addFamily(FAM1); - scan.addFamily(FAM2); - try (ResultScanner s = table.getScanner(scan)) { - int index = 0; - Result r; - while ((r = s.next()) != null) { - for (Cell key : r.rawCells()) { - times[index++] = key.getTimestamp(); - } - } - } - for (int i = 0; i < times.length - 1; i++) { - for (int j = i + 1; j < times.length; j++) { - assertTrue(times[j] > times[i]); - } - } - - // Flush data to disk and try again - TEST_UTIL.flush(); - - // Reset times - Arrays.fill(times, 0); - - try { - Thread.sleep(1000); - } catch (InterruptedException i) { - // ignore - } - scan = new Scan(); - scan.addFamily(FAM1); - scan.addFamily(FAM2); - try (ResultScanner s = table.getScanner(scan)) { - int index = 0; - Result r = null; - while ((r = s.next()) != null) { - for (Cell key : r.rawCells()) { - times[index++] = key.getTimestamp(); - } - } - for (int i = 0; i < times.length - 1; i++) { - for (int j = i + 1; j < times.length; j++) { - assertTrue(times[j] > times[i]); - } - } - } - } - } - - @Test - public void testListTables() throws IOException { - final String testTableName = name.getTableName().toString(); - final TableName tableName1 = TableName.valueOf(testTableName + "1"); - final TableName tableName2 = TableName.valueOf(testTableName + "2"); - final TableName tableName3 = TableName.valueOf(testTableName + "3"); - TableName[] tables = new TableName[] { tableName1, tableName2, tableName3 }; - for (TableName table : tables) { - TEST_UTIL.createTable(table, FAMILY); - } - try (Admin admin = TEST_UTIL.getAdmin()) { - List ts = admin.listTableDescriptors(); - HashSet result = new HashSet<>(ts); - int size = result.size(); - assertTrue(size >= tables.length); - for (TableName table : tables) { - boolean found = false; - for (TableDescriptor t : ts) { - if (t.getTableName().equals(table)) { - found = true; - break; - } - } - assertTrue("Not found: " + table, found); - } - } - } - - /** - * simple test that just executes parts of the client API that accept a pre-created Connection - * instance - */ - @Test - public void testUnmanagedHConnection() throws IOException { - final TableName tableName = name.getTableName(); - TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY); - try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table t = conn.getTable(tableName); Admin admin = conn.getAdmin()) { - assertTrue(admin.tableExists(tableName)); - assertTrue(t.get(new Get(ROW)).isEmpty()); - } - } - - /** - * test of that unmanaged HConnections are able to reconnect properly (see HBASE-5058) - */ - @Test - public void testUnmanagedHConnectionReconnect() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - Class registryImpl = conf.getClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - ZKConnectionRegistry.class); - // This test does not make sense for MasterRegistry since it stops the only master in the - // cluster and starts a new master without populating the underlying config for the connection. - Assume.assumeFalse(registryImpl.equals(MasterRegistry.class)); - final TableName tableName = name.getTableName(); - TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY); - try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - try (Table t = conn.getTable(tableName); Admin admin = conn.getAdmin()) { - assertTrue(admin.tableExists(tableName)); - assertTrue(t.get(new Get(ROW)).isEmpty()); - } - - // stop the master - SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - cluster.stopMaster(0, false); - cluster.waitOnMaster(0); - - // start up a new master - cluster.startMaster(); - assertTrue(cluster.waitForActiveAndReadyMaster()); - - // test that the same unmanaged connection works with a new - // Admin and can connect to the new master; - try (Admin admin = conn.getAdmin()) { - assertTrue(admin.tableExists(tableName)); - assertEquals( - admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().size(), - SLAVES); - } - } - } - - @Test - public void testMiscHTableStuff() throws IOException { - final String testTableName = name.getTableName().toString(); - final TableName tableAname = TableName.valueOf(testTableName + "A"); - final TableName tableBname = TableName.valueOf(testTableName + "B"); - final byte[] attrName = Bytes.toBytes("TESTATTR"); - final byte[] attrValue = Bytes.toBytes("somevalue"); - byte[] value = Bytes.toBytes("value"); - - try (Table a = TEST_UTIL.createTable(tableAname, HConstants.CATALOG_FAMILY); - Table b = TEST_UTIL.createTable(tableBname, HConstants.CATALOG_FAMILY)) { - Put put = new Put(ROW); - put.addColumn(HConstants.CATALOG_FAMILY, null, value); - a.put(put); - - // open a new connection to A and a connection to b - try (Table newA = TEST_UTIL.getConnection().getTable(tableAname)) { - - // copy data from A to B - Scan scan = new Scan(); - scan.addFamily(HConstants.CATALOG_FAMILY); - try (ResultScanner s = newA.getScanner(scan)) { - for (Result r : s) { - put = new Put(r.getRow()); - put.setDurability(Durability.SKIP_WAL); - for (Cell kv : r.rawCells()) { - put.add(kv); - } - b.put(put); - } - } - } - - // Opening a new connection to A will cause the tables to be reloaded - try (Table anotherA = TEST_UTIL.getConnection().getTable(tableAname)) { - Get get = new Get(ROW); - get.addFamily(HConstants.CATALOG_FAMILY); - anotherA.get(get); - } - - // We can still access A through newA because it has the table information - // cached. And if it needs to recalibrate, that will cause the information - // to be reloaded. - - // Test user metadata - Admin admin = TEST_UTIL.getAdmin(); - // make a modifiable descriptor - TableDescriptor desc = a.getDescriptor(); - // offline the table - admin.disableTable(tableAname); - // add a user attribute to HTD - TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(desc).setValue(attrName, attrValue); - // add a user attribute to HCD - for (ColumnFamilyDescriptor c : desc.getColumnFamilies()) { - builder.modifyColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(c).setValue(attrName, attrValue).build()); - } - // update metadata for all regions of this table - admin.modifyTable(builder.build()); - // enable the table - admin.enableTable(tableAname); - - // Test that attribute changes were applied - desc = a.getDescriptor(); - assertEquals("wrong table descriptor returned", desc.getTableName(), tableAname); - // check HTD attribute - value = desc.getValue(attrName); - assertNotNull("missing HTD attribute value", value); - assertFalse("HTD attribute value is incorrect", Bytes.compareTo(value, attrValue) != 0); - // check HCD attribute - for (ColumnFamilyDescriptor c : desc.getColumnFamilies()) { - value = c.getValue(attrName); - assertNotNull("missing HCD attribute value", value); - assertFalse("HCD attribute value is incorrect", Bytes.compareTo(value, attrValue) != 0); - } - } + @BeforeAll + public static void setUpBeforeClass() throws Exception { + initialize(MultiRowMutationEndpoint.class); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4WithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4WithCoprocessor.java new file mode 100644 index 000000000000..689c7046b852 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide4WithCoprocessor.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; + +/** + * Run tests that use the HBase clients; {@link Table}. Sets up the HBase mini cluster once at start + * and runs through all client tests. Each creates a table named for the method and does its stuff + * against that. Parameterized to run with different registry implementations. + */ +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSide4WithCoprocessor extends FromClientSideTest4 { + + protected TestFromClientSide4WithCoprocessor(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + @BeforeAll + public static void setUpBeforeClass() throws Exception { + initialize(NoOpScanPolicyObserver.class, MultiRowMutationEndpoint.class); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java index 8f820158e460..4efedafbf2a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java @@ -17,2890 +17,30 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicReference; -import org.apache.commons.lang3.ArrayUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNameTestRule; -import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.client.Scan.ReadType; -import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; -import org.apache.hadoop.hbase.filter.BinaryComparator; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.FilterList; -import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; -import org.apache.hadoop.hbase.filter.InclusiveStopFilter; -import org.apache.hadoop.hbase.filter.KeyOnlyFilter; -import org.apache.hadoop.hbase.filter.QualifierFilter; -import org.apache.hadoop.hbase.filter.RegexStringComparator; -import org.apache.hadoop.hbase.filter.RowFilter; -import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; -import org.apache.hadoop.hbase.filter.SubstringComparator; -import org.apache.hadoop.hbase.filter.ValueFilter; -import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; +import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; -import org.junit.AfterClass; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * Run tests that use the HBase clients; {@link Table}. Sets up the HBase mini cluster once at start * and runs through all client tests. Each creates a table named for the method and does its stuff * against that. Parameterized to run with different registry implementations. */ -@Category({ LargeTests.class, ClientTests.class }) -@SuppressWarnings("deprecation") -@RunWith(Parameterized.class) -public class TestFromClientSide5 extends FromClientSideBase { - private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide5.class); - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSide5.class); - - @Rule - public TableNameTestRule name = new TableNameTestRule(); - - // To keep the child classes happy. - TestFromClientSide5() { - } - - public TestFromClientSide5(Class registry, int numHedgedReqs) - throws Exception { - initialize(registry, numHedgedReqs, MultiRowMutationEndpoint.class); - } - - @Parameters(name = "{index}: registry={0}, numHedgedReqs={1}") - public static List parameters() { - return Arrays.asList(new Object[] { MasterRegistry.class, 1 }, - new Object[] { MasterRegistry.class, 2 }, new Object[] { ZKConnectionRegistry.class, 1 }); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - afterClass(); - } - - @Test - public void testGetClosestRowBefore() throws IOException, InterruptedException { - final TableName tableName = name.getTableName(); - final byte[] firstRow = Bytes.toBytes("row111"); - final byte[] secondRow = Bytes.toBytes("row222"); - final byte[] thirdRow = Bytes.toBytes("row333"); - final byte[] forthRow = Bytes.toBytes("row444"); - final byte[] beforeFirstRow = Bytes.toBytes("row"); - final byte[] beforeSecondRow = Bytes.toBytes("row22"); - final byte[] beforeThirdRow = Bytes.toBytes("row33"); - final byte[] beforeForthRow = Bytes.toBytes("row44"); - - try ( - Table table = TEST_UTIL.createTable(tableName, - new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 1024); - RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - - // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow - // in Store.rowAtOrBeforeFromStoreFile - String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); - Put put1 = new Put(firstRow); - Put put2 = new Put(secondRow); - Put put3 = new Put(thirdRow); - Put put4 = new Put(forthRow); - byte[] one = new byte[] { 1 }; - byte[] two = new byte[] { 2 }; - byte[] three = new byte[] { 3 }; - byte[] four = new byte[] { 4 }; - - put1.addColumn(HConstants.CATALOG_FAMILY, null, one); - put2.addColumn(HConstants.CATALOG_FAMILY, null, two); - put3.addColumn(HConstants.CATALOG_FAMILY, null, three); - put4.addColumn(HConstants.CATALOG_FAMILY, null, four); - table.put(put1); - table.put(put2); - table.put(put3); - table.put(put4); - region.flush(true); - - Result result; - - // Test before first that null is returned - result = getReverseScanResult(table, beforeFirstRow); - assertNull(result); - - // Test at first that first is returned - result = getReverseScanResult(table, firstRow); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), firstRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); - - // Test in between first and second that first is returned - result = getReverseScanResult(table, beforeSecondRow); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), firstRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); - - // Test at second make sure second is returned - result = getReverseScanResult(table, secondRow); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), secondRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); - - // Test in second and third, make sure second is returned - result = getReverseScanResult(table, beforeThirdRow); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), secondRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); - - // Test at third make sure third is returned - result = getReverseScanResult(table, thirdRow); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), thirdRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); - - // Test in third and forth, make sure third is returned - result = getReverseScanResult(table, beforeForthRow); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), thirdRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); - - // Test at forth make sure forth is returned - result = getReverseScanResult(table, forthRow); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), forthRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); - - // Test after forth make sure forth is returned - result = getReverseScanResult(table, Bytes.add(forthRow, one)); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), forthRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); - } - } - - private Result getReverseScanResult(Table table, byte[] row) throws IOException { - Scan scan = new Scan().withStartRow(row); - scan.setReadType(ReadType.PREAD); - scan.setReversed(true); - scan.setCaching(1); - scan.addFamily(HConstants.CATALOG_FAMILY); - try (ResultScanner scanner = table.getScanner(scan)) { - return scanner.next(); - } - } - - /** - * For HBASE-2156 - */ - @Test - public void testScanVariableReuse() { - Scan scan = new Scan(); - scan.addFamily(FAMILY); - scan.addColumn(FAMILY, ROW); - - assertEquals(1, scan.getFamilyMap().get(FAMILY).size()); - - scan = new Scan(); - scan.addFamily(FAMILY); - - assertNull(scan.getFamilyMap().get(FAMILY)); - assertTrue(scan.getFamilyMap().containsKey(FAMILY)); - } - - @Test - public void testMultiRowMutation() throws Exception { - LOG.info("Starting testMultiRowMutation"); - final TableName tableName = name.getTableName(); - final byte[] ROW1 = Bytes.toBytes("testRow1"); - final byte[] ROW2 = Bytes.toBytes("testRow2"); - final byte[] ROW3 = Bytes.toBytes("testRow3"); - - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - // Add initial data - t.batch(Arrays.asList(new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE), - new Put(ROW2).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(1L)), - new Put(ROW3).addColumn(FAMILY, QUALIFIER, VALUE)), new Object[3]); - - // Execute MultiRowMutation - Put put = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); - MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put); - - Delete delete = new Delete(ROW1); - MutationProto m2 = ProtobufUtil.toMutation(MutationType.DELETE, delete); - - Increment increment = new Increment(ROW2).addColumn(FAMILY, QUALIFIER, 1L); - MutationProto m3 = ProtobufUtil.toMutation(MutationType.INCREMENT, increment); - - Append append = new Append(ROW3).addColumn(FAMILY, QUALIFIER, VALUE); - MutationProto m4 = ProtobufUtil.toMutation(MutationType.APPEND, append); - - MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); - mrmBuilder.addMutationRequest(m1); - mrmBuilder.addMutationRequest(m2); - mrmBuilder.addMutationRequest(m3); - mrmBuilder.addMutationRequest(m4); - - CoprocessorRpcChannel channel = t.coprocessorService(ROW); - MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); - MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); - - // Assert - assertTrue(response.getProcessed()); - - Result r = t.get(new Get(ROW)); - assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - - r = t.get(new Get(ROW1)); - assertTrue(r.isEmpty()); - - r = t.get(new Get(ROW2)); - assertEquals(2L, Bytes.toLong(r.getValue(FAMILY, QUALIFIER))); - - r = t.get(new Get(ROW3)); - assertEquals(Bytes.toString(VALUE) + Bytes.toString(VALUE), - Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - } - } - - @Test - public void testMultiRowMutationWithSingleConditionWhenConditionMatches() throws Exception { - final TableName tableName = name.getTableName(); - final byte[] ROW1 = Bytes.toBytes("testRow1"); - final byte[] ROW2 = Bytes.toBytes("testRow2"); - final byte[] VALUE1 = Bytes.toBytes("testValue1"); - final byte[] VALUE2 = Bytes.toBytes("testValue2"); - - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - // Add initial data - t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2)); - - // Execute MultiRowMutation with conditions - Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); - MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); - Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); - MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); - Delete delete = new Delete(ROW2); - MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); - - MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); - mrmBuilder.addMutationRequest(m1); - mrmBuilder.addMutationRequest(m2); - mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition( - ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2, null)); - - CoprocessorRpcChannel channel = t.coprocessorService(ROW); - MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); - MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); - - // Assert - assertTrue(response.getProcessed()); - - Result r = t.get(new Get(ROW)); - assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - - r = t.get(new Get(ROW1)); - assertEquals(Bytes.toString(VALUE1), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - - r = t.get(new Get(ROW2)); - assertTrue(r.isEmpty()); - } - } - - @Test - public void testMultiRowMutationWithSingleConditionWhenConditionNotMatch() throws Exception { - final TableName tableName = name.getTableName(); - final byte[] ROW1 = Bytes.toBytes("testRow1"); - final byte[] ROW2 = Bytes.toBytes("testRow2"); - final byte[] VALUE1 = Bytes.toBytes("testValue1"); - final byte[] VALUE2 = Bytes.toBytes("testValue2"); - - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - // Add initial data - t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2)); - - // Execute MultiRowMutation with conditions - Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); - MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); - Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); - MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); - Delete delete = new Delete(ROW2); - MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); - - MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); - mrmBuilder.addMutationRequest(m1); - mrmBuilder.addMutationRequest(m2); - mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition( - ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE1, null)); - - CoprocessorRpcChannel channel = t.coprocessorService(ROW); - MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); - MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); - - // Assert - assertFalse(response.getProcessed()); - - Result r = t.get(new Get(ROW)); - assertTrue(r.isEmpty()); - - r = t.get(new Get(ROW1)); - assertTrue(r.isEmpty()); - - r = t.get(new Get(ROW2)); - assertEquals(Bytes.toString(VALUE2), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - } - } - - @Test - public void testMultiRowMutationWithMultipleConditionsWhenConditionsMatch() throws Exception { - final TableName tableName = name.getTableName(); - final byte[] ROW1 = Bytes.toBytes("testRow1"); - final byte[] ROW2 = Bytes.toBytes("testRow2"); - final byte[] VALUE1 = Bytes.toBytes("testValue1"); - final byte[] VALUE2 = Bytes.toBytes("testValue2"); - - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - // Add initial data - t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2)); - - // Execute MultiRowMutation with conditions - Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); - MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); - Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); - MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); - Delete delete = new Delete(ROW2); - MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); - - MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); - mrmBuilder.addMutationRequest(m1); - mrmBuilder.addMutationRequest(m2); - mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition( - ProtobufUtil.toCondition(ROW, FAMILY, QUALIFIER, CompareOperator.EQUAL, null, null)); - mrmBuilder.addCondition( - ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2, null)); - - CoprocessorRpcChannel channel = t.coprocessorService(ROW); - MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); - MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); - - // Assert - assertTrue(response.getProcessed()); - - Result r = t.get(new Get(ROW)); - assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - - r = t.get(new Get(ROW1)); - assertEquals(Bytes.toString(VALUE1), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - - r = t.get(new Get(ROW2)); - assertTrue(r.isEmpty()); - } - } - - @Test - public void testMultiRowMutationWithMultipleConditionsWhenConditionsNotMatch() throws Exception { - final TableName tableName = name.getTableName(); - final byte[] ROW1 = Bytes.toBytes("testRow1"); - final byte[] ROW2 = Bytes.toBytes("testRow2"); - final byte[] VALUE1 = Bytes.toBytes("testValue1"); - final byte[] VALUE2 = Bytes.toBytes("testValue2"); - - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - // Add initial data - t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2)); - - // Execute MultiRowMutation with conditions - Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); - MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); - Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); - MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); - Delete delete = new Delete(ROW2); - MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); - - MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); - mrmBuilder.addMutationRequest(m1); - mrmBuilder.addMutationRequest(m2); - mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition( - ProtobufUtil.toCondition(ROW1, FAMILY, QUALIFIER, CompareOperator.EQUAL, null, null)); - mrmBuilder.addCondition( - ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE1, null)); - - CoprocessorRpcChannel channel = t.coprocessorService(ROW); - MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); - MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); - - // Assert - assertFalse(response.getProcessed()); - - Result r = t.get(new Get(ROW)); - assertTrue(r.isEmpty()); - - r = t.get(new Get(ROW1)); - assertTrue(r.isEmpty()); - - r = t.get(new Get(ROW2)); - assertEquals(Bytes.toString(VALUE2), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - } - } - - @Test - public void testMultiRowMutationWithFilterConditionWhenConditionMatches() throws Exception { - final TableName tableName = name.getTableName(); - final byte[] ROW1 = Bytes.toBytes("testRow1"); - final byte[] ROW2 = Bytes.toBytes("testRow2"); - final byte[] QUALIFIER2 = Bytes.toBytes("testQualifier2"); - final byte[] VALUE1 = Bytes.toBytes("testValue1"); - final byte[] VALUE2 = Bytes.toBytes("testValue2"); - final byte[] VALUE3 = Bytes.toBytes("testValue3"); - - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - // Add initial data - t.put( - new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2).addColumn(FAMILY, QUALIFIER2, VALUE3)); - - // Execute MultiRowMutation with conditions - Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); - MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); - Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); - MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); - Delete delete = new Delete(ROW2); - MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); - - MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); - mrmBuilder.addMutationRequest(m1); - mrmBuilder.addMutationRequest(m2); - mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, - new FilterList( - new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2), - new SingleColumnValueFilter(FAMILY, QUALIFIER2, CompareOperator.EQUAL, VALUE3)), - null)); - - CoprocessorRpcChannel channel = t.coprocessorService(ROW); - MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); - MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); - - // Assert - assertTrue(response.getProcessed()); - - Result r = t.get(new Get(ROW)); - assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - - r = t.get(new Get(ROW1)); - assertEquals(Bytes.toString(VALUE1), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - - r = t.get(new Get(ROW2)); - assertTrue(r.isEmpty()); - } - } - - @Test - public void testMultiRowMutationWithFilterConditionWhenConditionNotMatch() throws Exception { - final TableName tableName = name.getTableName(); - final byte[] ROW1 = Bytes.toBytes("testRow1"); - final byte[] ROW2 = Bytes.toBytes("testRow2"); - final byte[] QUALIFIER2 = Bytes.toBytes("testQualifier2"); - final byte[] VALUE1 = Bytes.toBytes("testValue1"); - final byte[] VALUE2 = Bytes.toBytes("testValue2"); - final byte[] VALUE3 = Bytes.toBytes("testValue3"); - - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - // Add initial data - t.put( - new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2).addColumn(FAMILY, QUALIFIER2, VALUE3)); - - // Execute MultiRowMutation with conditions - Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE); - MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1); - Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1); - MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2); - Delete delete = new Delete(ROW2); - MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete); - - MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); - mrmBuilder.addMutationRequest(m1); - mrmBuilder.addMutationRequest(m2); - mrmBuilder.addMutationRequest(m3); - mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, - new FilterList( - new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2), - new SingleColumnValueFilter(FAMILY, QUALIFIER2, CompareOperator.EQUAL, VALUE2)), - null)); +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSide5 extends FromClientSideTest5 { - CoprocessorRpcChannel channel = t.coprocessorService(ROW); - MultiRowMutationService.BlockingInterface service = - MultiRowMutationService.newBlockingStub(channel); - MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build()); - - // Assert - assertFalse(response.getProcessed()); - - Result r = t.get(new Get(ROW)); - assertTrue(r.isEmpty()); - - r = t.get(new Get(ROW1)); - assertTrue(r.isEmpty()); - - r = t.get(new Get(ROW2)); - assertEquals(Bytes.toString(VALUE2), Bytes.toString(r.getValue(FAMILY, QUALIFIER))); - } - } - - @Test - public void testRowMutations() throws Exception { - LOG.info("Starting testRowMutations"); - final TableName tableName = name.getTableName(); - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - byte[][] QUALIFIERS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), - Bytes.toBytes("c"), Bytes.toBytes("d") }; - - // Test for Put operations - RowMutations arm = new RowMutations(ROW); - Put p = new Put(ROW); - p.addColumn(FAMILY, QUALIFIERS[0], VALUE); - arm.add(p); - Result r = t.mutateRow(arm); - assertTrue(r.getExists()); - assertTrue(r.isEmpty()); - - Get g = new Get(ROW); - r = t.get(g); - assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0]))); - - // Test for Put and Delete operations - arm = new RowMutations(ROW); - p = new Put(ROW); - p.addColumn(FAMILY, QUALIFIERS[1], VALUE); - arm.add(p); - Delete d = new Delete(ROW); - d.addColumns(FAMILY, QUALIFIERS[0]); - arm.add(d); - // TODO: Trying mutateRow again. The batch was failing with a one try only. - r = t.mutateRow(arm); - assertTrue(r.getExists()); - assertTrue(r.isEmpty()); - - r = t.get(g); - assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1]))); - assertNull(r.getValue(FAMILY, QUALIFIERS[0])); - - // Test for Increment and Append operations - arm = new RowMutations(ROW); - arm.add(Arrays.asList(new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE), - new Delete(ROW).addColumns(FAMILY, QUALIFIERS[1]), - new Increment(ROW).addColumn(FAMILY, QUALIFIERS[2], 5L), - new Append(ROW).addColumn(FAMILY, QUALIFIERS[3], Bytes.toBytes("abc")))); - r = t.mutateRow(arm); - assertTrue(r.getExists()); - assertEquals(5L, Bytes.toLong(r.getValue(FAMILY, QUALIFIERS[2]))); - assertEquals("abc", Bytes.toString(r.getValue(FAMILY, QUALIFIERS[3]))); - - g = new Get(ROW); - r = t.get(g); - assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0]))); - assertNull(r.getValue(FAMILY, QUALIFIERS[1])); - assertEquals(5L, Bytes.toLong(r.getValue(FAMILY, QUALIFIERS[2]))); - assertEquals("abc", Bytes.toString(r.getValue(FAMILY, QUALIFIERS[3]))); - - // Test that we get a region level exception - try { - arm = new RowMutations(ROW); - p = new Put(ROW); - p.addColumn(new byte[] { 'b', 'o', 'g', 'u', 's' }, QUALIFIERS[0], VALUE); - arm.add(p); - t.mutateRow(arm); - fail("Expected NoSuchColumnFamilyException"); - } catch (NoSuchColumnFamilyException e) { - return; - } catch (RetriesExhaustedWithDetailsException e) { - for (Throwable rootCause : e.getCauses()) { - if (rootCause instanceof NoSuchColumnFamilyException) { - return; - } - } - throw e; - } - } - } - - @Test - public void testBatchAppendWithReturnResultFalse() throws Exception { - LOG.info("Starting testBatchAppendWithReturnResultFalse"); - final TableName tableName = name.getTableName(); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { - Append append1 = new Append(Bytes.toBytes("row1")); - append1.setReturnResults(false); - append1.addColumn(FAMILY, Bytes.toBytes("f1"), Bytes.toBytes("value1")); - Append append2 = new Append(Bytes.toBytes("row1")); - append2.setReturnResults(false); - append2.addColumn(FAMILY, Bytes.toBytes("f1"), Bytes.toBytes("value2")); - List appends = new ArrayList<>(); - appends.add(append1); - appends.add(append2); - Object[] results = new Object[2]; - table.batch(appends, results); - assertEquals(2, results.length); - for (Object r : results) { - Result result = (Result) r; - assertTrue(result.isEmpty()); - } - } - } - - @Test - public void testAppend() throws Exception { - LOG.info("Starting testAppend"); - final TableName tableName = name.getTableName(); - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - byte[] v1 = Bytes.toBytes("42"); - byte[] v2 = Bytes.toBytes("23"); - byte[][] QUALIFIERS = - new byte[][] { Bytes.toBytes("b"), Bytes.toBytes("a"), Bytes.toBytes("c") }; - Append a = new Append(ROW); - a.addColumn(FAMILY, QUALIFIERS[0], v1); - a.addColumn(FAMILY, QUALIFIERS[1], v2); - a.setReturnResults(false); - assertEmptyResult(t.append(a)); - - a = new Append(ROW); - a.addColumn(FAMILY, QUALIFIERS[0], v2); - a.addColumn(FAMILY, QUALIFIERS[1], v1); - a.addColumn(FAMILY, QUALIFIERS[2], v2); - Result r = t.append(a); - assertEquals(0, Bytes.compareTo(Bytes.add(v1, v2), r.getValue(FAMILY, QUALIFIERS[0]))); - assertEquals(0, Bytes.compareTo(Bytes.add(v2, v1), r.getValue(FAMILY, QUALIFIERS[1]))); - // QUALIFIERS[2] previously not exist, verify both value and timestamp are correct - assertEquals(0, Bytes.compareTo(v2, r.getValue(FAMILY, QUALIFIERS[2]))); - assertEquals(r.getColumnLatestCell(FAMILY, QUALIFIERS[0]).getTimestamp(), - r.getColumnLatestCell(FAMILY, QUALIFIERS[2]).getTimestamp()); - } - } - - private List doAppend(final boolean walUsed) throws IOException { - LOG.info("Starting testAppend, walUsed is " + walUsed); - final TableName TABLENAME = - TableName.valueOf(walUsed ? "testAppendWithWAL" : "testAppendWithoutWAL"); - try (Table t = TEST_UTIL.createTable(TABLENAME, FAMILY)) { - final byte[] row1 = Bytes.toBytes("c"); - final byte[] row2 = Bytes.toBytes("b"); - final byte[] row3 = Bytes.toBytes("a"); - final byte[] qual = Bytes.toBytes("qual"); - Put put_0 = new Put(row2); - put_0.addColumn(FAMILY, qual, Bytes.toBytes("put")); - Put put_1 = new Put(row3); - put_1.addColumn(FAMILY, qual, Bytes.toBytes("put")); - Append append_0 = new Append(row1); - append_0.addColumn(FAMILY, qual, Bytes.toBytes("i")); - Append append_1 = new Append(row1); - append_1.addColumn(FAMILY, qual, Bytes.toBytes("k")); - Append append_2 = new Append(row1); - append_2.addColumn(FAMILY, qual, Bytes.toBytes("e")); - if (!walUsed) { - append_2.setDurability(Durability.SKIP_WAL); - } - Append append_3 = new Append(row1); - append_3.addColumn(FAMILY, qual, Bytes.toBytes("a")); - Scan s = new Scan(); - s.setCaching(1); - t.append(append_0); - t.put(put_0); - t.put(put_1); - List results = new LinkedList<>(); - try (ResultScanner scanner = t.getScanner(s)) { - // get one row(should be row3) from the scanner to make sure that we have send a request to - // region server, which means we have already set the read point, so later we should not see - // the new appended values. - Result r = scanner.next(); - assertNotNull(r); - results.add(r); - t.append(append_1); - t.append(append_2); - t.append(append_3); - for (;;) { - r = scanner.next(); - if (r == null) { - break; - } - results.add(r); - } - } - TEST_UTIL.deleteTable(TABLENAME); - return results; - } - } - - @Test - public void testAppendWithoutWAL() throws Exception { - List resultsWithWal = doAppend(true); - List resultsWithoutWal = doAppend(false); - assertEquals(resultsWithWal.size(), resultsWithoutWal.size()); - for (int i = 0; i < resultsWithWal.size(); ++i) { - Result resultWithWal = resultsWithWal.get(i); - Result resultWithoutWal = resultsWithoutWal.get(i); - assertEquals(resultWithWal.rawCells().length, resultWithoutWal.rawCells().length); - for (int j = 0; j < resultWithWal.rawCells().length; ++j) { - Cell cellWithWal = resultWithWal.rawCells()[j]; - Cell cellWithoutWal = resultWithoutWal.rawCells()[j]; - assertArrayEquals(CellUtil.cloneRow(cellWithWal), CellUtil.cloneRow(cellWithoutWal)); - assertArrayEquals(CellUtil.cloneFamily(cellWithWal), CellUtil.cloneFamily(cellWithoutWal)); - assertArrayEquals(CellUtil.cloneQualifier(cellWithWal), - CellUtil.cloneQualifier(cellWithoutWal)); - assertArrayEquals(CellUtil.cloneValue(cellWithWal), CellUtil.cloneValue(cellWithoutWal)); - } - } - } - - @Test - public void testClientPoolRoundRobin() throws IOException { - final TableName tableName = name.getTableName(); - - int poolSize = 3; - int numVersions = poolSize * 2; - Configuration conf = TEST_UTIL.getConfiguration(); - conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "round-robin"); - conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize); - - try ( - Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, Integer.MAX_VALUE)) { - - final long ts = EnvironmentEdgeManager.currentTime(); - Get get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readAllVersions(); - - for (int versions = 1; versions <= numVersions; versions++) { - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, ts + versions, VALUE); - table.put(put); - - Result result = table.get(get); - NavigableMap navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); - - assertEquals("The number of versions of '" + Bytes.toString(FAMILY) + ":" - + Bytes.toString(QUALIFIER) + " did not match", versions, navigableMap.size()); - for (Map.Entry entry : navigableMap.entrySet()) { - assertTrue("The value at time " + entry.getKey() + " did not match what was put", - Bytes.equals(VALUE, entry.getValue())); - } - } - } - } - - @Ignore("Flakey: HBASE-8989") - @Test - public void testClientPoolThreadLocal() throws IOException { - final TableName tableName = name.getTableName(); - - int poolSize = Integer.MAX_VALUE; - int numVersions = 3; - Configuration conf = TEST_UTIL.getConfiguration(); - conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "thread-local"); - conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize); - - try (final Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 3)) { - - final long ts = EnvironmentEdgeManager.currentTime(); - final Get get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readAllVersions(); - - for (int versions = 1; versions <= numVersions; versions++) { - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, ts + versions, VALUE); - table.put(put); - - Result result = table.get(get); - NavigableMap navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); - - assertEquals("The number of versions of '" + Bytes.toString(FAMILY) + ":" - + Bytes.toString(QUALIFIER) + " did not match", versions, navigableMap.size()); - for (Map.Entry entry : navigableMap.entrySet()) { - assertTrue("The value at time " + entry.getKey() + " did not match what was put", - Bytes.equals(VALUE, entry.getValue())); - } - } - - final Object waitLock = new Object(); - ExecutorService executorService = Executors.newFixedThreadPool(numVersions); - final AtomicReference error = new AtomicReference<>(null); - for (int versions = numVersions; versions < numVersions * 2; versions++) { - final int versionsCopy = versions; - executorService.submit((Callable) () -> { - try { - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, ts + versionsCopy, VALUE); - table.put(put); - - Result result = table.get(get); - NavigableMap navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); - - assertEquals( - "The number of versions of '" + Bytes.toString(FAMILY) + ":" - + Bytes.toString(QUALIFIER) + " did not match " + versionsCopy, - versionsCopy, navigableMap.size()); - for (Map.Entry entry : navigableMap.entrySet()) { - assertTrue("The value at time " + entry.getKey() + " did not match what was put", - Bytes.equals(VALUE, entry.getValue())); - } - synchronized (waitLock) { - waitLock.wait(); - } - } catch (Exception ignored) { - } catch (AssertionError e) { - // the error happens in a thread, it won't fail the test, - // need to pass it to the caller for proper handling. - error.set(e); - LOG.error(e.toString(), e); - } - - return null; - }); - } - synchronized (waitLock) { - waitLock.notifyAll(); - } - executorService.shutdownNow(); - assertNull(error.get()); - } - } - - @Test - public void testCheckAndPut() throws IOException { - final byte[] anotherrow = Bytes.toBytes("anotherrow"); - final byte[] value2 = Bytes.toBytes("abcd"); - - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - Put put1 = new Put(ROW); - put1.addColumn(FAMILY, QUALIFIER, VALUE); - - // row doesn't exist, so using non-null value should be considered "not match". - boolean ok = - table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenPut(put1); - assertFalse(ok); - - // row doesn't exist, so using "ifNotExists" should be considered "match". - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put1); - assertTrue(ok); - - // row now exists, so using "ifNotExists" should be considered "not match". - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put1); - assertFalse(ok); - - Put put2 = new Put(ROW); - put2.addColumn(FAMILY, QUALIFIER, value2); - - // row now exists, use the matching value to check - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenPut(put2); - assertTrue(ok); - - Put put3 = new Put(anotherrow); - put3.addColumn(FAMILY, QUALIFIER, VALUE); - - // try to do CheckAndPut on different rows - try { - table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(value2).thenPut(put3); - fail("trying to check and modify different rows should have failed."); - } catch (Exception ignored) { - } - } - } - - @Test - public void testCheckAndMutateWithTimeRange() throws IOException { - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - final long ts = EnvironmentEdgeManager.currentTime() / 2; - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, ts, VALUE); - - boolean ok = - table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put); - assertTrue(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.at(ts + 10000)).ifEquals(VALUE).thenPut(put); - assertFalse(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.from(ts + 10000)).ifEquals(VALUE).thenPut(put); - assertFalse(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.between(ts + 10000, ts + 20000)).ifEquals(VALUE).thenPut(put); - assertFalse(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.until(ts)) - .ifEquals(VALUE).thenPut(put); - assertFalse(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) - .ifEquals(VALUE).thenPut(put); - assertTrue(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.from(ts)) - .ifEquals(VALUE).thenPut(put); - assertTrue(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.between(ts, ts + 20000)).ifEquals(VALUE).thenPut(put); - assertTrue(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.until(ts + 10000)).ifEquals(VALUE).thenPut(put); - assertTrue(ok); - - RowMutations rm = new RowMutations(ROW).add((Mutation) put); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.at(ts + 10000)).ifEquals(VALUE).thenMutate(rm); - assertFalse(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) - .ifEquals(VALUE).thenMutate(rm); - assertTrue(ok); - - Delete delete = new Delete(ROW).addColumn(FAMILY, QUALIFIER); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .timeRange(TimeRange.at(ts + 10000)).ifEquals(VALUE).thenDelete(delete); - assertFalse(ok); - - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).timeRange(TimeRange.at(ts)) - .ifEquals(VALUE).thenDelete(delete); - assertTrue(ok); - } - } - - @Test - public void testCheckAndPutWithCompareOp() throws IOException { - final byte[] value1 = Bytes.toBytes("aaaa"); - final byte[] value2 = Bytes.toBytes("bbbb"); - final byte[] value3 = Bytes.toBytes("cccc"); - final byte[] value4 = Bytes.toBytes("dddd"); - - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - - Put put2 = new Put(ROW); - put2.addColumn(FAMILY, QUALIFIER, value2); - - Put put3 = new Put(ROW); - put3.addColumn(FAMILY, QUALIFIER, value3); - - // row doesn't exist, so using "ifNotExists" should be considered "match". - boolean ok = - table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put2); - assertTrue(ok); - - // cell = "bbbb", using "aaaa" to compare only LESS/LESS_OR_EQUAL/NOT_EQUAL - // turns out "match" - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value1).thenPut(put2); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value1).thenPut(put2); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value1).thenPut(put2); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value1).thenPut(put2); - assertTrue(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value1).thenPut(put2); - assertTrue(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value1).thenPut(put3); - assertTrue(ok); - - // cell = "cccc", using "dddd" to compare only LARGER/LARGER_OR_EQUAL/NOT_EQUAL - // turns out "match" - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value4).thenPut(put3); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value4).thenPut(put3); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value4).thenPut(put3); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value4).thenPut(put3); - assertTrue(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value4).thenPut(put3); - assertTrue(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value4).thenPut(put2); - assertTrue(ok); - - // cell = "bbbb", using "bbbb" to compare only GREATER_OR_EQUAL/LESS_OR_EQUAL/EQUAL - // turns out "match" - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value2).thenPut(put2); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value2).thenPut(put2); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value2).thenPut(put2); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value2).thenPut(put2); - assertTrue(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value2).thenPut(put2); - assertTrue(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value2).thenPut(put3); - assertTrue(ok); - } + public TestFromClientSide5(Class registryImpl, int numHedgedReqs) { + super(registryImpl, numHedgedReqs); } - @Test - public void testCheckAndDelete() throws IOException { - final byte[] value1 = Bytes.toBytes("aaaa"); - - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, value1); - table.put(put); - - Delete delete = new Delete(ROW); - delete.addColumns(FAMILY, QUALIFIER); - - boolean ok = - table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(value1).thenDelete(delete); - assertTrue(ok); - } - } - - @Test - public void testCheckAndDeleteWithCompareOp() throws IOException { - final byte[] value1 = Bytes.toBytes("aaaa"); - final byte[] value2 = Bytes.toBytes("bbbb"); - final byte[] value3 = Bytes.toBytes("cccc"); - final byte[] value4 = Bytes.toBytes("dddd"); - - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - - Put put2 = new Put(ROW); - put2.addColumn(FAMILY, QUALIFIER, value2); - table.put(put2); - - Put put3 = new Put(ROW); - put3.addColumn(FAMILY, QUALIFIER, value3); - - Delete delete = new Delete(ROW); - delete.addColumns(FAMILY, QUALIFIER); - - // cell = "bbbb", using "aaaa" to compare only LESS/LESS_OR_EQUAL/NOT_EQUAL - // turns out "match" - boolean ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value1).thenDelete(delete); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value1).thenDelete(delete); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value1).thenDelete(delete); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value1).thenDelete(delete); - assertTrue(ok); - table.put(put2); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value1).thenDelete(delete); - assertTrue(ok); - table.put(put2); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value1).thenDelete(delete); - assertTrue(ok); - - // cell = "cccc", using "dddd" to compare only LARGER/LARGER_OR_EQUAL/NOT_EQUAL - // turns out "match" - table.put(put3); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value4).thenDelete(delete); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value4).thenDelete(delete); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value4).thenDelete(delete); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value4).thenDelete(delete); - assertTrue(ok); - table.put(put3); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value4).thenDelete(delete); - assertTrue(ok); - table.put(put3); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value4).thenDelete(delete); - assertTrue(ok); - - // cell = "bbbb", using "bbbb" to compare only GREATER_OR_EQUAL/LESS_OR_EQUAL/EQUAL - // turns out "match" - table.put(put2); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER, value2).thenDelete(delete); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.NOT_EQUAL, value2).thenDelete(delete); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS, value2).thenDelete(delete); - assertFalse(ok); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.GREATER_OR_EQUAL, value2).thenDelete(delete); - assertTrue(ok); - table.put(put2); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.LESS_OR_EQUAL, value2).thenDelete(delete); - assertTrue(ok); - table.put(put2); - ok = table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER) - .ifMatches(CompareOperator.EQUAL, value2).thenDelete(delete); - assertTrue(ok); - } - } - - /** - * Test ScanMetrics - */ - @Test - @SuppressWarnings({ "unused", "checkstyle:EmptyBlock" }) - public void testScanMetrics() throws Exception { - final TableName tableName = name.getTableName(); - - // Set up test table: - // Create table: - try (Table ht = TEST_UTIL.createMultiRegionTable(tableName, FAMILY)) { - int numOfRegions; - try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - numOfRegions = r.getStartKeys().length; - } - // Create 3 rows in the table, with rowkeys starting with "zzz*" so that - // scan are forced to hit all the regions. - Put put1 = new Put(Bytes.toBytes("zzz1")); - put1.addColumn(FAMILY, QUALIFIER, VALUE); - Put put2 = new Put(Bytes.toBytes("zzz2")); - put2.addColumn(FAMILY, QUALIFIER, VALUE); - Put put3 = new Put(Bytes.toBytes("zzz3")); - put3.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(Arrays.asList(put1, put2, put3)); - - Scan scan1 = new Scan(); - int numRecords = 0; - try (ResultScanner scanner = ht.getScanner(scan1)) { - for (Result result : scanner) { - numRecords++; - } - - LOG.info("test data has {} records.", numRecords); - - // by default, scan metrics collection is turned off - assertNull(scanner.getScanMetrics()); - } - - // turn on scan metrics - Scan scan2 = new Scan(); - scan2.setScanMetricsEnabled(true); - scan2.setCaching(numRecords + 1); - try (ResultScanner scanner = ht.getScanner(scan2)) { - for (Result result : scanner.next(numRecords - 1)) { - } - assertNotNull(scanner.getScanMetrics()); - } - - // set caching to 1, because metrics are collected in each roundtrip only - scan2 = new Scan(); - scan2.setScanMetricsEnabled(true); - scan2.setCaching(1); - try (ResultScanner scanner = ht.getScanner(scan2)) { - // per HBASE-5717, this should still collect even if you don't run all the way to - // the end of the scanner. So this is asking for 2 of the 3 rows we inserted. - for (Result result : scanner.next(numRecords - 1)) { - } - ScanMetrics scanMetrics = scanner.getScanMetrics(); - assertEquals("Did not access all the regions in the table", numOfRegions, - scanMetrics.countOfRegions.get()); - } - - // check byte counters - scan2 = new Scan(); - scan2.setScanMetricsEnabled(true); - scan2.setCaching(1); - try (ResultScanner scanner = ht.getScanner(scan2)) { - int numBytes = 0; - for (Result result : scanner) { - for (Cell cell : result.listCells()) { - numBytes += PrivateCellUtil.estimatedSerializedSizeOf(cell); - } - } - ScanMetrics scanMetrics = scanner.getScanMetrics(); - assertEquals("Did not count the result bytes", numBytes, - scanMetrics.countOfBytesInResults.get()); - } - - // check byte counters on a small scan - scan2 = new Scan(); - scan2.setScanMetricsEnabled(true); - scan2.setCaching(1); - scan2.setReadType(ReadType.PREAD); - try (ResultScanner scanner = ht.getScanner(scan2)) { - int numBytes = 0; - for (Result result : scanner) { - for (Cell cell : result.listCells()) { - numBytes += PrivateCellUtil.estimatedSerializedSizeOf(cell); - } - } - ScanMetrics scanMetrics = scanner.getScanMetrics(); - assertEquals("Did not count the result bytes", numBytes, - scanMetrics.countOfBytesInResults.get()); - } - - // now, test that the metrics are still collected even if you don't call close, but do - // run past the end of all the records - /** - * There seems to be a timing issue here. Comment out for now. Fix when time. Scan - * scanWithoutClose = new Scan(); scanWithoutClose.setCaching(1); - * scanWithoutClose.setScanMetricsEnabled(true); ResultScanner scannerWithoutClose = - * ht.getScanner(scanWithoutClose); for (Result result : scannerWithoutClose.next(numRecords + - * 1)) { } ScanMetrics scanMetricsWithoutClose = getScanMetrics(scanWithoutClose); - * assertEquals("Did not access all the regions in the table", numOfRegions, - * scanMetricsWithoutClose.countOfRegions.get()); - */ - - // finally, - // test that the metrics are collected correctly if you both run past all the records, - // AND close the scanner - Scan scanWithClose = new Scan(); - // make sure we can set caching up to the number of a scanned values - scanWithClose.setCaching(numRecords); - scanWithClose.setScanMetricsEnabled(true); - try (ResultScanner scannerWithClose = ht.getScanner(scanWithClose)) { - for (Result result : scannerWithClose.next(numRecords + 1)) { - } - scannerWithClose.close(); - ScanMetrics scanMetricsWithClose = scannerWithClose.getScanMetrics(); - assertEquals("Did not access all the regions in the table", numOfRegions, - scanMetricsWithClose.countOfRegions.get()); - } - } finally { - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * Tests that cache on write works all the way up from the client-side. Performs inserts, flushes, - * and compactions, verifying changes in the block cache along the way. - */ - @Test - public void testCacheOnWriteEvictOnClose() throws Exception { - final TableName tableName = name.getTableName(); - byte[] data = Bytes.toBytes("data"); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - // get the block cache and region - String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName(); - - HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); - HStore store = region.getStores().iterator().next(); - CacheConfig cacheConf = store.getCacheConfig(); - cacheConf.setCacheDataOnWrite(true); - cacheConf.setEvictOnClose(true); - BlockCache cache = cacheConf.getBlockCache().get(); - - // establish baseline stats - long startBlockCount = cache.getBlockCount(); - long startBlockHits = cache.getStats().getHitCount(); - long startBlockMiss = cache.getStats().getMissCount(); - - // wait till baseline is stable, (minimal 500 ms) - for (int i = 0; i < 5; i++) { - Thread.sleep(100); - if ( - startBlockCount != cache.getBlockCount() - || startBlockHits != cache.getStats().getHitCount() - || startBlockMiss != cache.getStats().getMissCount() - ) { - startBlockCount = cache.getBlockCount(); - startBlockHits = cache.getStats().getHitCount(); - startBlockMiss = cache.getStats().getMissCount(); - i = -1; - } - } - - // insert data - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, data); - table.put(put); - assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data)); - - // data was in memstore so don't expect any changes - assertEquals(startBlockCount, cache.getBlockCount()); - assertEquals(startBlockHits, cache.getStats().getHitCount()); - assertEquals(startBlockMiss, cache.getStats().getMissCount()); - - // flush the data - LOG.debug("Flushing cache"); - region.flush(true); - - // expect two more blocks in cache - DATA and ROOT_INDEX - // , no change in hits/misses - long expectedBlockCount = startBlockCount + 2; - long expectedBlockHits = startBlockHits; - long expectedBlockMiss = startBlockMiss; - assertEquals(expectedBlockCount, cache.getBlockCount()); - assertEquals(expectedBlockHits, cache.getStats().getHitCount()); - assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); - // read the data and expect same blocks, one new hit, no misses - assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data)); - assertEquals(expectedBlockCount, cache.getBlockCount()); - assertEquals(++expectedBlockHits, cache.getStats().getHitCount()); - assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); - // insert a second column, read the row, no new blocks, one new hit - byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER); - byte[] data2 = Bytes.add(data, data); - put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER2, data2); - table.put(put); - Result r = table.get(new Get(ROW)); - assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data)); - assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2)); - assertEquals(expectedBlockCount, cache.getBlockCount()); - assertEquals(++expectedBlockHits, cache.getStats().getHitCount()); - assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); - // flush, one new block - System.out.println("Flushing cache"); - region.flush(true); - - // + 1 for Index Block, +1 for data block - expectedBlockCount += 2; - assertEquals(expectedBlockCount, cache.getBlockCount()); - assertEquals(expectedBlockHits, cache.getStats().getHitCount()); - assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); - // compact, net minus two blocks, two hits, no misses - System.out.println("Compacting"); - assertEquals(2, store.getStorefilesCount()); - store.triggerMajorCompaction(); - region.compact(true); - store.closeAndArchiveCompactedFiles(); - waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max - assertEquals(1, store.getStorefilesCount()); - // evicted two data blocks and two index blocks and compaction does not cache new blocks - expectedBlockCount = 0; - assertEquals(expectedBlockCount, cache.getBlockCount()); - expectedBlockHits += 2; - assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); - assertEquals(expectedBlockHits, cache.getStats().getHitCount()); - // read the row, this should be a cache miss because we don't cache data - // blocks on compaction - r = table.get(new Get(ROW)); - assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data)); - assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2)); - expectedBlockCount += 1; // cached one data block - assertEquals(expectedBlockCount, cache.getBlockCount()); - assertEquals(expectedBlockHits, cache.getStats().getHitCount()); - assertEquals(++expectedBlockMiss, cache.getStats().getMissCount()); - } - } - } - - private void waitForStoreFileCount(HStore store, int count, int timeout) - throws InterruptedException { - long start = EnvironmentEdgeManager.currentTime(); - while ( - start + timeout > EnvironmentEdgeManager.currentTime() && store.getStorefilesCount() != count - ) { - Thread.sleep(100); - } - System.out.println("start=" + start + ", now=" + EnvironmentEdgeManager.currentTime() + ", cur=" - + store.getStorefilesCount()); - assertEquals(count, store.getStorefilesCount()); - } - - /** - * Tests the non cached version of getRegionLocator by moving a region. - */ - @Test - public void testNonCachedGetRegionLocation() throws Exception { - // Test Initialization. - final TableName tableName = name.getTableName(); - byte[] family1 = Bytes.toBytes("f1"); - byte[] family2 = Bytes.toBytes("f2"); - try (Table ignored = TEST_UTIL.createTable(tableName, new byte[][] { family1, family2 }, 10); - Admin admin = TEST_UTIL.getAdmin(); - RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - List allRegionLocations = locator.getAllRegionLocations(); - assertEquals(1, allRegionLocations.size()); - RegionInfo regionInfo = allRegionLocations.get(0).getRegion(); - ServerName addrBefore = allRegionLocations.get(0).getServerName(); - // Verify region location before move. - HRegionLocation addrCache = locator.getRegionLocation(regionInfo.getStartKey(), false); - HRegionLocation addrNoCache = locator.getRegionLocation(regionInfo.getStartKey(), true); - - assertEquals(addrBefore.getPort(), addrCache.getPort()); - assertEquals(addrBefore.getPort(), addrNoCache.getPort()); - - // Make sure more than one server. - if (TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size() <= 1) { - TEST_UTIL.getMiniHBaseCluster().startRegionServer(); - Waiter.waitFor(TEST_UTIL.getConfiguration(), 30000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size() > 1; - } - }); - } - - ServerName addrAfter = null; - // Now move the region to a different server. - for (int i = 0; i - < TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size(); i++) { - HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(i); - ServerName addr = regionServer.getServerName(); - if (addr.getPort() != addrBefore.getPort()) { - admin.move(regionInfo.getEncodedNameAsBytes(), addr); - // Wait for the region to move. - Thread.sleep(5000); - addrAfter = addr; - break; - } - } - - // Verify the region was moved. - addrCache = locator.getRegionLocation(regionInfo.getStartKey(), false); - addrNoCache = locator.getRegionLocation(regionInfo.getStartKey(), true); - assertNotNull(addrAfter); - assertTrue(addrAfter.getPort() != addrCache.getPort()); - assertEquals(addrAfter.getPort(), addrNoCache.getPort()); - } - } - - /** - * Tests getRegionsInRange by creating some regions over which a range of keys spans; then - * changing the key range. - */ - @Test - public void testGetRegionsInRange() throws Exception { - // Test Initialization. - byte[] startKey = Bytes.toBytes("ddc"); - byte[] endKey = Bytes.toBytes("mmm"); - TableName tableName = name.getTableName(); - TEST_UTIL.createMultiRegionTable(tableName, new byte[][] { FAMILY }, 10); - - int numOfRegions; - try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - numOfRegions = r.getStartKeys().length; - } - assertEquals(26, numOfRegions); - - // Get the regions in this range - List regionsList = getRegionsInRange(tableName, startKey, endKey); - assertEquals(10, regionsList.size()); - - // Change the start key - startKey = Bytes.toBytes("fff"); - regionsList = getRegionsInRange(tableName, startKey, endKey); - assertEquals(7, regionsList.size()); - - // Change the end key - endKey = Bytes.toBytes("nnn"); - regionsList = getRegionsInRange(tableName, startKey, endKey); - assertEquals(8, regionsList.size()); - - // Empty start key - regionsList = getRegionsInRange(tableName, HConstants.EMPTY_START_ROW, endKey); - assertEquals(13, regionsList.size()); - - // Empty end key - regionsList = getRegionsInRange(tableName, startKey, HConstants.EMPTY_END_ROW); - assertEquals(21, regionsList.size()); - - // Both start and end keys empty - regionsList = - getRegionsInRange(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); - assertEquals(26, regionsList.size()); - - // Change the end key to somewhere in the last block - endKey = Bytes.toBytes("zzz1"); - regionsList = getRegionsInRange(tableName, startKey, endKey); - assertEquals(21, regionsList.size()); - - // Change the start key to somewhere in the first block - startKey = Bytes.toBytes("aac"); - regionsList = getRegionsInRange(tableName, startKey, endKey); - assertEquals(26, regionsList.size()); - - // Make start and end key the same - startKey = Bytes.toBytes("ccc"); - endKey = Bytes.toBytes("ccc"); - regionsList = getRegionsInRange(tableName, startKey, endKey); - assertEquals(1, regionsList.size()); - } - - private List getRegionsInRange(TableName tableName, byte[] startKey, - byte[] endKey) throws IOException { - List regionsInRange = new ArrayList<>(); - byte[] currentKey = startKey; - final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW); - try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - do { - HRegionLocation regionLocation = r.getRegionLocation(currentKey); - regionsInRange.add(regionLocation); - currentKey = regionLocation.getRegion().getEndKey(); - } while ( - !Bytes.equals(currentKey, HConstants.EMPTY_END_ROW) - && (endKeyIsEndOfTable || Bytes.compareTo(currentKey, endKey) < 0) - ); - return regionsInRange; - } - } - - @Test - public void testJira6912() throws Exception { - final TableName tableName = name.getTableName(); - try (Table foo = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, 10)) { - - List puts = new ArrayList<>(); - for (int i = 0; i != 100; i++) { - Put put = new Put(Bytes.toBytes(i)); - put.addColumn(FAMILY, FAMILY, Bytes.toBytes(i)); - puts.add(put); - } - foo.put(puts); - // If i comment this out it works - TEST_UTIL.flush(); - - Scan scan = new Scan(); - scan.withStartRow(Bytes.toBytes(1)); - scan.withStopRow(Bytes.toBytes(3)); - scan.addColumn(FAMILY, FAMILY); - scan.setFilter( - new RowFilter(CompareOperator.NOT_EQUAL, new BinaryComparator(Bytes.toBytes(1)))); - - try (ResultScanner scanner = foo.getScanner(scan)) { - Result[] bar = scanner.next(100); - assertEquals(1, bar.length); - } - } - } - - @Test - public void testScan_NullQualifier() throws IOException { - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - Put put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - put = new Put(ROW); - put.addColumn(FAMILY, null, VALUE); - table.put(put); - LOG.info("Row put"); - - Scan scan = new Scan(); - scan.addColumn(FAMILY, null); - - ResultScanner scanner = table.getScanner(scan); - Result[] bar = scanner.next(100); - assertEquals(1, bar.length); - assertEquals(1, bar[0].size()); - - scan = new Scan(); - scan.addFamily(FAMILY); - - scanner = table.getScanner(scan); - bar = scanner.next(100); - assertEquals(1, bar.length); - assertEquals(2, bar[0].size()); - } - } - - @Test - public void testNegativeTimestamp() throws IOException { - try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - - try { - Put put = new Put(ROW, -1); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - fail("Negative timestamps should not have been allowed"); - } catch (IllegalArgumentException ex) { - assertTrue(ex.getMessage().contains("negative")); - } - - try { - Put put = new Put(ROW); - long ts = -1; - put.addColumn(FAMILY, QUALIFIER, ts, VALUE); - table.put(put); - fail("Negative timestamps should not have been allowed"); - } catch (IllegalArgumentException ex) { - assertTrue(ex.getMessage().contains("negative")); - } - - try { - Delete delete = new Delete(ROW, -1); - table.delete(delete); - fail("Negative timestamps should not have been allowed"); - } catch (IllegalArgumentException ex) { - assertTrue(ex.getMessage().contains("negative")); - } - - try { - Delete delete = new Delete(ROW); - delete.addFamily(FAMILY, -1); - table.delete(delete); - fail("Negative timestamps should not have been allowed"); - } catch (IllegalArgumentException ex) { - assertTrue(ex.getMessage().contains("negative")); - } - - try { - Scan scan = new Scan(); - scan.setTimeRange(-1, 1); - table.getScanner(scan); - fail("Negative timestamps should not have been allowed"); - } catch (IllegalArgumentException ex) { - assertTrue(ex.getMessage().contains("negative")); - } - - // KeyValue should allow negative timestamps for backwards compat. Otherwise, if the user - // already has negative timestamps in cluster data, HBase won't be able to handle that - try { - new KeyValue(Bytes.toBytes(42), Bytes.toBytes(42), Bytes.toBytes(42), -1, - Bytes.toBytes(42)); - } catch (IllegalArgumentException ex) { - fail("KeyValue SHOULD allow negative timestamps"); - } - - } - } - - @Test - public void testRawScanRespectsVersions() throws Exception { - final TableName tableName = name.getTableName(); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { - byte[] row = Bytes.toBytes("row"); - - // put the same row 4 times, with different values - Put p = new Put(row); - p.addColumn(FAMILY, QUALIFIER, 10, VALUE); - table.put(p); - p = new Put(row); - p.addColumn(FAMILY, QUALIFIER, 11, ArrayUtils.add(VALUE, (byte) 2)); - table.put(p); - - p = new Put(row); - p.addColumn(FAMILY, QUALIFIER, 12, ArrayUtils.add(VALUE, (byte) 3)); - table.put(p); - - p = new Put(row); - p.addColumn(FAMILY, QUALIFIER, 13, ArrayUtils.add(VALUE, (byte) 4)); - table.put(p); - - int versions = 4; - Scan s = new Scan().withStartRow(row); - // get all the possible versions - s.readAllVersions(); - s.setRaw(true); - - try (ResultScanner scanner = table.getScanner(s)) { - int count = 0; - for (Result r : scanner) { - assertEquals("Found an unexpected number of results for the row!", versions, - r.listCells().size()); - count++; - } - assertEquals("Found more than a single row when raw scanning the table with a single row!", - 1, count); - } - - // then if we decrease the number of versions, but keep the scan raw, we should see exactly - // that number of versions - versions = 2; - s.readVersions(versions); - try (ResultScanner scanner = table.getScanner(s)) { - int count = 0; - for (Result r : scanner) { - assertEquals("Found an unexpected number of results for the row!", versions, - r.listCells().size()); - count++; - } - assertEquals("Found more than a single row when raw scanning the table with a single row!", - 1, count); - } - - // finally, if we turn off raw scanning, but max out the number of versions, we should go back - // to seeing just three - versions = 3; - s.readVersions(versions); - try (ResultScanner scanner = table.getScanner(s)) { - int count = 0; - for (Result r : scanner) { - assertEquals("Found an unexpected number of results for the row!", versions, - r.listCells().size()); - count++; - } - assertEquals("Found more than a single row when raw scanning the table with a single row!", - 1, count); - } - - } - TEST_UTIL.deleteTable(tableName); - } - - @Test - public void testEmptyFilterList() throws Exception { - // Test Initialization. - final TableName tableName = name.getTableName(); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { - - // Insert one row each region - Put put = new Put(Bytes.toBytes("row")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - List scanResults = new LinkedList<>(); - Scan scan = new Scan(); - scan.setFilter(new FilterList()); - try (ResultScanner scanner = table.getScanner(scan)) { - for (Result r : scanner) { - scanResults.add(r); - } - } - assertEquals(1, scanResults.size()); - Get g = new Get(Bytes.toBytes("row")); - g.setFilter(new FilterList()); - Result getResult = table.get(g); - Result scanResult = scanResults.get(0); - assertEquals(scanResult.rawCells().length, getResult.rawCells().length); - for (int i = 0; i != scanResult.rawCells().length; ++i) { - Cell scanCell = scanResult.rawCells()[i]; - Cell getCell = getResult.rawCells()[i]; - assertEquals(0, Bytes.compareTo(CellUtil.cloneRow(scanCell), CellUtil.cloneRow(getCell))); - assertEquals(0, - Bytes.compareTo(CellUtil.cloneFamily(scanCell), CellUtil.cloneFamily(getCell))); - assertEquals(0, - Bytes.compareTo(CellUtil.cloneQualifier(scanCell), CellUtil.cloneQualifier(getCell))); - assertEquals(0, - Bytes.compareTo(CellUtil.cloneValue(scanCell), CellUtil.cloneValue(getCell))); - } - } - } - - @Test - public void testSmallScan() throws Exception { - // Test Initialization. - final TableName tableName = name.getTableName(); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { - - // Insert one row each region - int insertNum = 10; - for (int i = 0; i < 10; i++) { - Put put = new Put(Bytes.toBytes("row" + String.format("%03d", i))); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - } - - // normal scan - try (ResultScanner scanner = table.getScanner(new Scan())) { - int count = 0; - for (Result r : scanner) { - assertFalse(r.isEmpty()); - count++; - } - assertEquals(insertNum, count); - } - - // small scan - Scan scan = new Scan().withStartRow(HConstants.EMPTY_START_ROW) - .withStopRow(HConstants.EMPTY_END_ROW, true); - scan.setReadType(ReadType.PREAD); - scan.setCaching(2); - try (ResultScanner scanner = table.getScanner(scan)) { - int count = 0; - for (Result r : scanner) { - assertFalse(r.isEmpty()); - count++; - } - assertEquals(insertNum, count); - } - } - } - - @Test - public void testSuperSimpleWithReverseScan() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - Put put = new Put(Bytes.toBytes("0-b11111-0000000000000000000")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - put = new Put(Bytes.toBytes("0-b11111-0000000000000000002")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - put = new Put(Bytes.toBytes("0-b11111-0000000000000000004")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - put = new Put(Bytes.toBytes("0-b11111-0000000000000000006")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - put = new Put(Bytes.toBytes("0-b11111-0000000000000000008")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - put = new Put(Bytes.toBytes("0-b22222-0000000000000000001")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - put = new Put(Bytes.toBytes("0-b22222-0000000000000000003")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - put = new Put(Bytes.toBytes("0-b22222-0000000000000000005")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - put = new Put(Bytes.toBytes("0-b22222-0000000000000000007")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - put = new Put(Bytes.toBytes("0-b22222-0000000000000000009")); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - Scan scan = new Scan().withStartRow(Bytes.toBytes("0-b11111-9223372036854775807")) - .withStopRow(Bytes.toBytes("0-b11111-0000000000000000000"), true); - scan.setReversed(true); - try (ResultScanner scanner = ht.getScanner(scan)) { - Result result = scanner.next(); - assertTrue(Bytes.equals(result.getRow(), Bytes.toBytes("0-b11111-0000000000000000008"))); - } - } - } - - @Test - public void testFiltersWithReverseScan() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - byte[][] ROWS = makeN(ROW, 10); - byte[][] QUALIFIERS = - { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), - Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), - Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), - Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), - Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; - for (int i = 0; i < 10; i++) { - Put put = new Put(ROWS[i]); - put.addColumn(FAMILY, QUALIFIERS[i], VALUE); - ht.put(put); - } - Scan scan = new Scan(); - scan.setReversed(true); - scan.addFamily(FAMILY); - Filter filter = - new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator("col[1-5]")); - scan.setFilter(filter); - try (ResultScanner scanner = ht.getScanner(scan)) { - int expectedIndex = 5; - for (Result result : scanner) { - assertEquals(1, result.size()); - Cell c = result.rawCells()[0]; - assertTrue(Bytes.equals(c.getRowArray(), c.getRowOffset(), c.getRowLength(), - ROWS[expectedIndex], 0, ROWS[expectedIndex].length)); - assertTrue( - Bytes.equals(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength(), - QUALIFIERS[expectedIndex], 0, QUALIFIERS[expectedIndex].length)); - expectedIndex--; - } - assertEquals(0, expectedIndex); - } - } - } - - @Test - public void testKeyOnlyFilterWithReverseScan() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - byte[][] ROWS = makeN(ROW, 10); - byte[][] QUALIFIERS = - { Bytes.toBytes("col0--"), Bytes.toBytes("col1--"), - Bytes.toBytes("col2--"), Bytes.toBytes("col3--"), - Bytes.toBytes("col4--"), Bytes.toBytes("col5--"), - Bytes.toBytes("col6--"), Bytes.toBytes("col7--"), - Bytes.toBytes("col8--"), Bytes.toBytes("col9--") }; - for (int i = 0; i < 10; i++) { - Put put = new Put(ROWS[i]); - put.addColumn(FAMILY, QUALIFIERS[i], VALUE); - ht.put(put); - } - Scan scan = new Scan(); - scan.setReversed(true); - scan.addFamily(FAMILY); - Filter filter = new KeyOnlyFilter(true); - scan.setFilter(filter); - try (ResultScanner ignored = ht.getScanner(scan)) { - int count = 0; - for (Result result : ht.getScanner(scan)) { - assertEquals(1, result.size()); - assertEquals(Bytes.SIZEOF_INT, result.rawCells()[0].getValueLength()); - assertEquals(VALUE.length, Bytes.toInt(CellUtil.cloneValue(result.rawCells()[0]))); - count++; - } - assertEquals(10, count); - } - } - } - - /** - * Test simple table and non-existent row cases. - */ - @Test - public void testSimpleMissingWithReverseScan() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - byte[][] ROWS = makeN(ROW, 4); - - // Try to get a row on an empty table - Scan scan = new Scan(); - scan.setReversed(true); - Result result = getSingleScanResult(ht, scan); - assertNullResult(result); - - scan = new Scan().withStartRow(ROWS[0]); - scan.setReversed(true); - result = getSingleScanResult(ht, scan); - assertNullResult(result); - - scan = new Scan().withStartRow(ROWS[0]).withStopRow(ROWS[1], true); - scan.setReversed(true); - result = getSingleScanResult(ht, scan); - assertNullResult(result); - - scan = new Scan(); - scan.setReversed(true); - scan.addFamily(FAMILY); - result = getSingleScanResult(ht, scan); - assertNullResult(result); - - scan = new Scan(); - scan.setReversed(true); - scan.addColumn(FAMILY, QUALIFIER); - result = getSingleScanResult(ht, scan); - assertNullResult(result); - - // Insert a row - - Put put = new Put(ROWS[2]); - put.addColumn(FAMILY, QUALIFIER, VALUE); - ht.put(put); - - // Make sure we can scan the row - scan = new Scan(); - scan.setReversed(true); - result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); - - scan = new Scan().withStartRow(ROWS[3]).withStopRow(ROWS[0], true); - scan.setReversed(true); - result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); - - scan = new Scan().withStartRow(ROWS[2]).withStopRow(ROWS[1], true); - scan.setReversed(true); - result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROWS[2], FAMILY, QUALIFIER, VALUE); - - // Try to scan empty rows around it - // Introduced MemStore#shouldSeekForReverseScan to fix the following - scan = new Scan().withStartRow(ROWS[1]); - scan.setReversed(true); - result = getSingleScanResult(ht, scan); - assertNullResult(result); - } - } - - @Test - public void testNullWithReverseScan() throws Exception { - final TableName tableName = name.getTableName(); - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY)) { - // Null qualifier (should work) - Put put = new Put(ROW); - put.addColumn(FAMILY, null, VALUE); - ht.put(put); - scanTestNull(ht, ROW, FAMILY, VALUE, true); - Delete delete = new Delete(ROW); - delete.addColumns(FAMILY, null); - ht.delete(delete); - } - - // Use a new table - try (Table ht = - TEST_UTIL.createTable(TableName.valueOf(name.getTableName().toString() + "2"), FAMILY)) { - // Empty qualifier, byte[0] instead of null (should work) - Put put = new Put(ROW); - put.addColumn(FAMILY, HConstants.EMPTY_BYTE_ARRAY, VALUE); - ht.put(put); - scanTestNull(ht, ROW, FAMILY, VALUE, true); - TEST_UTIL.flush(); - scanTestNull(ht, ROW, FAMILY, VALUE, true); - Delete delete = new Delete(ROW); - delete.addColumns(FAMILY, HConstants.EMPTY_BYTE_ARRAY); - ht.delete(delete); - // Null value - put = new Put(ROW); - put.addColumn(FAMILY, QUALIFIER, null); - ht.put(put); - Scan scan = new Scan(); - scan.setReversed(true); - scan.addColumn(FAMILY, QUALIFIER); - Result result = getSingleScanResult(ht, scan); - assertSingleResult(result, ROW, FAMILY, QUALIFIER, null); - } - } - - @Test - @SuppressWarnings("checkstyle:MethodLength") - public void testDeletesWithReverseScan() throws Exception { - final TableName tableName = name.getTableName(); - byte[][] ROWS = makeNAscii(ROW, 6); - byte[][] FAMILIES = makeNAscii(FAMILY, 3); - byte[][] VALUES = makeN(VALUE, 5); - long[] ts = { 1000, 2000, 3000, 4000, 5000 }; - try (Table ht = TEST_UTIL.createTable(tableName, FAMILIES, 3)) { - - Put put = new Put(ROW); - put.addColumn(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); - put.addColumn(FAMILIES[0], QUALIFIER, ts[1], VALUES[1]); - ht.put(put); - - Delete delete = new Delete(ROW); - delete.addFamily(FAMILIES[0], ts[0]); - ht.delete(delete); - - Scan scan = new Scan().withStartRow(ROW); - scan.setReversed(true); - scan.addFamily(FAMILIES[0]); - scan.readVersions(Integer.MAX_VALUE); - Result result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[] { ts[1] }, - new byte[][] { VALUES[1] }, 0, 0); - - // Test delete latest version - put = new Put(ROW); - put.addColumn(FAMILIES[0], QUALIFIER, ts[4], VALUES[4]); - put.addColumn(FAMILIES[0], QUALIFIER, ts[2], VALUES[2]); - put.addColumn(FAMILIES[0], QUALIFIER, ts[3], VALUES[3]); - put.addColumn(FAMILIES[0], null, ts[4], VALUES[4]); - put.addColumn(FAMILIES[0], null, ts[2], VALUES[2]); - put.addColumn(FAMILIES[0], null, ts[3], VALUES[3]); - ht.put(put); - - delete = new Delete(ROW); - delete.addColumn(FAMILIES[0], QUALIFIER); // ts[4] - ht.delete(delete); - - scan = new Scan().withStartRow(ROW); - scan.setReversed(true); - scan.addColumn(FAMILIES[0], QUALIFIER); - scan.readVersions(Integer.MAX_VALUE); - result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[] { ts[1], ts[2], ts[3] }, - new byte[][] { VALUES[1], VALUES[2], VALUES[3] }, 0, 2); - - // Test for HBASE-1847 - delete = new Delete(ROW); - delete.addColumn(FAMILIES[0], null); - ht.delete(delete); - - // Cleanup null qualifier - delete = new Delete(ROW); - delete.addColumns(FAMILIES[0], null); - ht.delete(delete); - - // Expected client behavior might be that you can re-put deleted values - // But alas, this is not to be. We can't put them back in either case. - - put = new Put(ROW); - put.addColumn(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]); - put.addColumn(FAMILIES[0], QUALIFIER, ts[4], VALUES[4]); - ht.put(put); - - // The Scanner returns the previous values, the expected-naive-unexpected - // behavior - - scan = new Scan().withStartRow(ROW); - scan.setReversed(true); - scan.addFamily(FAMILIES[0]); - scan.readVersions(Integer.MAX_VALUE); - result = getSingleScanResult(ht, scan); - assertNResult(result, ROW, FAMILIES[0], QUALIFIER, new long[] { ts[1], ts[2], ts[3] }, - new byte[][] { VALUES[1], VALUES[2], VALUES[3] }, 0, 2); - - // Test deleting an entire family from one row but not the other various - // ways - - put = new Put(ROWS[0]); - put.addColumn(FAMILIES[1], QUALIFIER, ts[0], VALUES[0]); - put.addColumn(FAMILIES[1], QUALIFIER, ts[1], VALUES[1]); - put.addColumn(FAMILIES[2], QUALIFIER, ts[2], VALUES[2]); - put.addColumn(FAMILIES[2], QUALIFIER, ts[3], VALUES[3]); - ht.put(put); - - put = new Put(ROWS[1]); - put.addColumn(FAMILIES[1], QUALIFIER, ts[0], VALUES[0]); - put.addColumn(FAMILIES[1], QUALIFIER, ts[1], VALUES[1]); - put.addColumn(FAMILIES[2], QUALIFIER, ts[2], VALUES[2]); - put.addColumn(FAMILIES[2], QUALIFIER, ts[3], VALUES[3]); - ht.put(put); - - put = new Put(ROWS[2]); - put.addColumn(FAMILIES[1], QUALIFIER, ts[0], VALUES[0]); - put.addColumn(FAMILIES[1], QUALIFIER, ts[1], VALUES[1]); - put.addColumn(FAMILIES[2], QUALIFIER, ts[2], VALUES[2]); - put.addColumn(FAMILIES[2], QUALIFIER, ts[3], VALUES[3]); - ht.put(put); - - delete = new Delete(ROWS[0]); - delete.addFamily(FAMILIES[2]); - ht.delete(delete); - - delete = new Delete(ROWS[1]); - delete.addColumns(FAMILIES[1], QUALIFIER); - ht.delete(delete); - - delete = new Delete(ROWS[2]); - delete.addColumn(FAMILIES[1], QUALIFIER); - delete.addColumn(FAMILIES[1], QUALIFIER); - delete.addColumn(FAMILIES[2], QUALIFIER); - ht.delete(delete); - - scan = new Scan().withStartRow(ROWS[0]); - scan.setReversed(true); - scan.addFamily(FAMILIES[1]); - scan.addFamily(FAMILIES[2]); - scan.readVersions(Integer.MAX_VALUE); - result = getSingleScanResult(ht, scan); - assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); - assertNResult(result, ROWS[0], FAMILIES[1], QUALIFIER, new long[] { ts[0], ts[1] }, - new byte[][] { VALUES[0], VALUES[1] }, 0, 1); - - scan = new Scan().withStartRow(ROWS[1]); - scan.setReversed(true); - scan.addFamily(FAMILIES[1]); - scan.addFamily(FAMILIES[2]); - scan.readVersions(Integer.MAX_VALUE); - result = getSingleScanResult(ht, scan); - assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); - - scan = new Scan().withStartRow(ROWS[2]); - scan.setReversed(true); - scan.addFamily(FAMILIES[1]); - scan.addFamily(FAMILIES[2]); - scan.readVersions(Integer.MAX_VALUE); - result = getSingleScanResult(ht, scan); - assertEquals(1, result.size()); - assertNResult(result, ROWS[2], FAMILIES[2], QUALIFIER, new long[] { ts[2] }, - new byte[][] { VALUES[2] }, 0, 0); - - // Test if we delete the family first in one row (HBASE-1541) - - delete = new Delete(ROWS[3]); - delete.addFamily(FAMILIES[1]); - ht.delete(delete); - - put = new Put(ROWS[3]); - put.addColumn(FAMILIES[2], QUALIFIER, VALUES[0]); - ht.put(put); - - put = new Put(ROWS[4]); - put.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]); - put.addColumn(FAMILIES[2], QUALIFIER, VALUES[2]); - ht.put(put); - - scan = new Scan().withStartRow(ROWS[4]); - scan.setReversed(true); - scan.addFamily(FAMILIES[1]); - scan.addFamily(FAMILIES[2]); - scan.readVersions(Integer.MAX_VALUE); - ResultScanner scanner = ht.getScanner(scan); - result = scanner.next(); - assertEquals("Expected 2 keys but received " + result.size(), 2, result.size()); - assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]), ROWS[4])); - assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[1]), ROWS[4])); - assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]), VALUES[1])); - assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[1]), VALUES[2])); - result = scanner.next(); - assertEquals("Expected 1 key but received " + result.size(), 1, result.size()); - assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]), ROWS[3])); - assertTrue(Bytes.equals(CellUtil.cloneValue(result.rawCells()[0]), VALUES[0])); - scanner.close(); - } - } - - /** - * Tests reversed scan under multi regions - */ - @Test - public void testReversedScanUnderMultiRegions() throws Exception { - // Test Initialization. - final TableName tableName = name.getTableName(); - byte[] maxByteArray = ConnectionUtils.MAX_BYTE_ARRAY; - byte[][] splitRows = new byte[][] { Bytes.toBytes("005"), - Bytes.add(Bytes.toBytes("005"), Bytes.multiple(maxByteArray, 16)), Bytes.toBytes("006"), - Bytes.add(Bytes.toBytes("006"), Bytes.multiple(maxByteArray, 8)), Bytes.toBytes("007"), - Bytes.add(Bytes.toBytes("007"), Bytes.multiple(maxByteArray, 4)), Bytes.toBytes("008"), - Bytes.multiple(maxByteArray, 2) }; - try (Table table = TEST_UTIL.createTable(tableName, FAMILY, splitRows)) { - TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); - - try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - assertEquals(splitRows.length + 1, l.getAllRegionLocations().size()); - } - // Insert one row each region - int insertNum = splitRows.length; - for (byte[] splitRow : splitRows) { - Put put = new Put(splitRow); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - } - - // scan forward - try (ResultScanner scanner = table.getScanner(new Scan())) { - int count = 0; - for (Result r : scanner) { - assertFalse(r.isEmpty()); - count++; - } - assertEquals(insertNum, count); - } - - // scan backward - Scan scan = new Scan(); - scan.setReversed(true); - try (ResultScanner scanner = table.getScanner(scan)) { - int count = 0; - byte[] lastRow = null; - for (Result r : scanner) { - assertFalse(r.isEmpty()); - count++; - byte[] thisRow = r.getRow(); - if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" - + Bytes.toString(thisRow), Bytes.compareTo(thisRow, lastRow) < 0); - } - lastRow = thisRow; - } - assertEquals(insertNum, count); - } - } - } - - /** - * Tests reversed scan under multi regions - */ - @Test - public void testSmallReversedScanUnderMultiRegions() throws Exception { - // Test Initialization. - final TableName tableName = name.getTableName(); - byte[][] splitRows = new byte[][] { Bytes.toBytes("000"), Bytes.toBytes("002"), - Bytes.toBytes("004"), Bytes.toBytes("006"), Bytes.toBytes("008"), Bytes.toBytes("010") }; - try (Table table = TEST_UTIL.createTable(tableName, FAMILY, splitRows)) { - TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); - - try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - assertEquals(splitRows.length + 1, l.getAllRegionLocations().size()); - } - for (byte[] splitRow : splitRows) { - Put put = new Put(splitRow); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - - byte[] nextRow = Bytes.copy(splitRow); - nextRow[nextRow.length - 1]++; - - put = new Put(nextRow); - put.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(put); - } - - // scan forward - try (ResultScanner scanner = table.getScanner(new Scan())) { - int count = 0; - for (Result r : scanner) { - assertTrue(!r.isEmpty()); - count++; - } - assertEquals(12, count); - } - - reverseScanTest(table, ReadType.STREAM); - reverseScanTest(table, ReadType.PREAD); - reverseScanTest(table, ReadType.DEFAULT); - } - } - - private void reverseScanTest(Table table, ReadType readType) throws IOException { - // scan backward - Scan scan = new Scan(); - scan.setReversed(true); - try (ResultScanner scanner = table.getScanner(scan)) { - int count = 0; - byte[] lastRow = null; - for (Result r : scanner) { - assertTrue(!r.isEmpty()); - count++; - byte[] thisRow = r.getRow(); - if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" - + Bytes.toString(thisRow), Bytes.compareTo(thisRow, lastRow) < 0); - } - lastRow = thisRow; - } - assertEquals(12, count); - } - - scan = new Scan(); - scan.setReadType(readType); - scan.setReversed(true); - scan.withStartRow(Bytes.toBytes("002")); - try (ResultScanner scanner = table.getScanner(scan)) { - int count = 0; - byte[] lastRow = null; - for (Result r : scanner) { - assertTrue(!r.isEmpty()); - count++; - byte[] thisRow = r.getRow(); - if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" - + Bytes.toString(thisRow), Bytes.compareTo(thisRow, lastRow) < 0); - } - lastRow = thisRow; - } - assertEquals(3, count); // 000 001 002 - } - - scan = new Scan(); - scan.setReadType(readType); - scan.setReversed(true); - scan.withStartRow(Bytes.toBytes("002")); - scan.withStopRow(Bytes.toBytes("000")); - try (ResultScanner scanner = table.getScanner(scan)) { - int count = 0; - byte[] lastRow = null; - for (Result r : scanner) { - assertFalse(r.isEmpty()); - count++; - byte[] thisRow = r.getRow(); - if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" - + Bytes.toString(thisRow), Bytes.compareTo(thisRow, lastRow) < 0); - } - lastRow = thisRow; - } - assertEquals(2, count); // 001 002 - } - - scan = new Scan(); - scan.setReadType(readType); - scan.setReversed(true); - scan.withStartRow(Bytes.toBytes("001")); - try (ResultScanner scanner = table.getScanner(scan)) { - int count = 0; - byte[] lastRow = null; - for (Result r : scanner) { - assertFalse(r.isEmpty()); - count++; - byte[] thisRow = r.getRow(); - if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" - + Bytes.toString(thisRow), Bytes.compareTo(thisRow, lastRow) < 0); - } - lastRow = thisRow; - } - assertEquals(2, count); // 000 001 - } - - scan = new Scan(); - scan.setReadType(readType); - scan.setReversed(true); - scan.withStartRow(Bytes.toBytes("000")); - try (ResultScanner scanner = table.getScanner(scan)) { - int count = 0; - byte[] lastRow = null; - for (Result r : scanner) { - assertFalse(r.isEmpty()); - count++; - byte[] thisRow = r.getRow(); - if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" - + Bytes.toString(thisRow), Bytes.compareTo(thisRow, lastRow) < 0); - } - lastRow = thisRow; - } - assertEquals(1, count); // 000 - } - - scan = new Scan(); - scan.setReadType(readType); - scan.setReversed(true); - scan.withStartRow(Bytes.toBytes("006")); - scan.withStopRow(Bytes.toBytes("002")); - try (ResultScanner scanner = table.getScanner(scan)) { - int count = 0; - byte[] lastRow = null; - for (Result r : scanner) { - assertFalse(r.isEmpty()); - count++; - byte[] thisRow = r.getRow(); - if (lastRow != null) { - assertTrue("Error scan order, last row= " + Bytes.toString(lastRow) + ",this row=" - + Bytes.toString(thisRow), Bytes.compareTo(thisRow, lastRow) < 0); - } - lastRow = thisRow; - } - assertEquals(4, count); // 003 004 005 006 - } - } - - @Test - public void testFilterAllRecords() throws IOException { - Scan scan = new Scan(); - scan.setBatch(1); - scan.setCaching(1); - // Filter out any records - scan.setFilter(new FilterList(new FirstKeyOnlyFilter(), new InclusiveStopFilter(new byte[0]))); - try (Table table = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { - try (ResultScanner s = table.getScanner(scan)) { - assertNull(s.next()); - } - } - } - - @Test - public void testCellSizeLimit() throws IOException { - final TableName tableName = name.getTableName(); - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, Integer.toString(10 * 1024)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); - try (Admin admin = TEST_UTIL.getAdmin()) { - admin.createTable(tableDescriptor); - } - // Will succeed - try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { - t.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(0L))); - t.increment(new Increment(ROW).addColumn(FAMILY, QUALIFIER, 1L)); - } - // Will succeed - try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { - t.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, new byte[9 * 1024])); - } - // Will fail - try (Table t = TEST_UTIL.getConnection().getTable(tableName)) { - try { - t.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, new byte[10 * 1024])); - fail("Oversize cell failed to trigger exception"); - } catch (IOException e) { - // expected - } - try { - t.append(new Append(ROW).addColumn(FAMILY, QUALIFIER, new byte[2 * 1024])); - fail("Oversize cell failed to trigger exception"); - } catch (IOException e) { - // expected - } - } - } - - @Test - public void testCellSizeNoLimit() throws IOException { - final TableName tableName = name.getTableName(); - TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) - .setValue(HRegion.HBASE_MAX_CELL_SIZE_KEY, Integer.toString(0)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); - - try (Admin admin = TEST_UTIL.getAdmin()) { - admin.createTable(tableDescriptor); - } - - // Will succeed - try (Table ht = TEST_UTIL.getConnection().getTable(tableName)) { - ht.put( - new Put(ROW).addColumn(FAMILY, QUALIFIER, new byte[HRegion.DEFAULT_MAX_CELL_SIZE - 1024])); - ht.append(new Append(ROW).addColumn(FAMILY, QUALIFIER, new byte[1024 + 1])); - } - } - - @Test - public void testDeleteSpecifiedVersionOfSpecifiedColumn() throws Exception { - final TableName tableName = name.getTableName(); - - byte[][] VALUES = makeN(VALUE, 5); - long[] ts = { 1000, 2000, 3000, 4000, 5000 }; - - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 5)) { - - Put put = new Put(ROW); - // Put version 1000,2000,3000,4000 of column FAMILY:QUALIFIER - for (int t = 0; t < 4; t++) { - put.addColumn(FAMILY, QUALIFIER, ts[t], VALUES[t]); - } - ht.put(put); - - Delete delete = new Delete(ROW); - // Delete version 3000 of column FAMILY:QUALIFIER - delete.addColumn(FAMILY, QUALIFIER, ts[2]); - ht.delete(delete); - - Get get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(Integer.MAX_VALUE); - Result result = ht.get(get); - // verify version 1000,2000,4000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[1], ts[3] }, - new byte[][] { VALUES[0], VALUES[1], VALUES[3] }, 0, 2); - - delete = new Delete(ROW); - // Delete a version 5000 of column FAMILY:QUALIFIER which didn't exist - delete.addColumn(FAMILY, QUALIFIER, ts[4]); - ht.delete(delete); - - get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(Integer.MAX_VALUE); - result = ht.get(get); - // verify version 1000,2000,4000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[1], ts[3] }, - new byte[][] { VALUES[0], VALUES[1], VALUES[3] }, 0, 2); - } - } - - @Test - public void testDeleteLatestVersionOfSpecifiedColumn() throws Exception { - final TableName tableName = name.getTableName(); - byte[][] VALUES = makeN(VALUE, 5); - long[] ts = { 1000, 2000, 3000, 4000, 5000 }; - try (Table ht = TEST_UTIL.createTable(tableName, FAMILY, 5)) { - Put put = new Put(ROW); - // Put version 1000,2000,3000,4000 of column FAMILY:QUALIFIER - for (int t = 0; t < 4; t++) { - put.addColumn(FAMILY, QUALIFIER, ts[t], VALUES[t]); - } - ht.put(put); - - Delete delete = new Delete(ROW); - // Delete latest version of column FAMILY:QUALIFIER - delete.addColumn(FAMILY, QUALIFIER); - ht.delete(delete); - - Get get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(Integer.MAX_VALUE); - Result result = ht.get(get); - // verify version 1000,2000,3000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[1], ts[2] }, - new byte[][] { VALUES[0], VALUES[1], VALUES[2] }, 0, 2); - - delete = new Delete(ROW); - // Delete two latest version of column FAMILY:QUALIFIER - delete.addColumn(FAMILY, QUALIFIER); - delete.addColumn(FAMILY, QUALIFIER); - ht.delete(delete); - - get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(Integer.MAX_VALUE); - result = ht.get(get); - // verify version 1000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0] }, - new byte[][] { VALUES[0] }, 0, 0); - - put = new Put(ROW); - // Put a version 5000 of column FAMILY:QUALIFIER - put.addColumn(FAMILY, QUALIFIER, ts[4], VALUES[4]); - ht.put(put); - - get = new Get(ROW); - get.addColumn(FAMILY, QUALIFIER); - get.readVersions(Integer.MAX_VALUE); - result = ht.get(get); - // verify version 1000,5000 remains for column FAMILY:QUALIFIER - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[0], ts[4] }, - new byte[][] { VALUES[0], VALUES[4] }, 0, 1); - } - } - - /** - * Test for HBASE-17125 - */ - @Test - public void testReadWithFilter() throws Exception { - final TableName tableName = name.getTableName(); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY, 3)) { - - byte[] VALUEA = Bytes.toBytes("value-a"); - byte[] VALUEB = Bytes.toBytes("value-b"); - long[] ts = { 1000, 2000, 3000, 4000 }; - - Put put = new Put(ROW); - // Put version 1000,2000,3000,4000 of column FAMILY:QUALIFIER - for (int t = 0; t <= 3; t++) { - if (t <= 1) { - put.addColumn(FAMILY, QUALIFIER, ts[t], VALUEA); - } else { - put.addColumn(FAMILY, QUALIFIER, ts[t], VALUEB); - } - } - table.put(put); - - Scan scan = new Scan() - .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) - .readVersions(3); - ResultScanner scanner = table.getScanner(scan); - Result result = scanner.next(); - // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, - 0, 0); - - Get get = new Get(ROW) - .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) - .readVersions(3); - result = table.get(get); - // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, - 0, 0); - - // Test with max versions 1, it should still read ts[1] - scan = new Scan() - .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) - .readVersions(1); - scanner = table.getScanner(scan); - result = scanner.next(); - // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, - 0, 0); - - // Test with max versions 1, it should still read ts[1] - get = new Get(ROW) - .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) - .readVersions(1); - result = table.get(get); - // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, - 0, 0); - - // Test with max versions 5, it should still read ts[1] - scan = new Scan() - .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) - .readVersions(5); - scanner = table.getScanner(scan); - result = scanner.next(); - // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, - 0, 0); - - // Test with max versions 5, it should still read ts[1] - get = new Get(ROW) - .setFilter(new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("value-a"))) - .readVersions(5); - result = table.get(get); - // ts[0] has gone from user view. Only read ts[2] which value is less or equal to 3 - assertNResult(result, ROW, FAMILY, QUALIFIER, new long[] { ts[1] }, new byte[][] { VALUEA }, - 0, 0); - } - } - - @Test - public void testCellUtilTypeMethods() throws IOException { - final TableName tableName = name.getTableName(); - try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { - - final byte[] row = Bytes.toBytes("p"); - Put p = new Put(row); - p.addColumn(FAMILY, QUALIFIER, VALUE); - table.put(p); - - try (ResultScanner scanner = table.getScanner(new Scan())) { - Result result = scanner.next(); - assertNotNull(result); - CellScanner cs = result.cellScanner(); - assertTrue(cs.advance()); - Cell c = cs.current(); - assertTrue(CellUtil.isPut(c)); - assertFalse(CellUtil.isDelete(c)); - assertFalse(cs.advance()); - assertNull(scanner.next()); - } - - Delete d = new Delete(row); - d.addColumn(FAMILY, QUALIFIER); - table.delete(d); - - Scan scan = new Scan(); - scan.setRaw(true); - try (ResultScanner scanner = table.getScanner(scan)) { - Result result = scanner.next(); - assertNotNull(result); - CellScanner cs = result.cellScanner(); - assertTrue(cs.advance()); - - // First cell should be the delete (masking the Put) - Cell c = cs.current(); - assertTrue("Cell should be a Delete: " + c, CellUtil.isDelete(c)); - assertFalse("Cell should not be a Put: " + c, CellUtil.isPut(c)); - - // Second cell should be the original Put - assertTrue(cs.advance()); - c = cs.current(); - assertFalse("Cell should not be a Delete: " + c, CellUtil.isDelete(c)); - assertTrue("Cell should be a Put: " + c, CellUtil.isPut(c)); - - // No more cells in this row - assertFalse(cs.advance()); - - // No more results in this scan - assertNull(scanner.next()); - } - } - } - - @Test(expected = DoNotRetryIOException.class) - public void testCreateTableWithZeroRegionReplicas() throws Exception { - TableName tableName = name.getTableName(); - TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))) - .setRegionReplication(0).build(); - - TEST_UTIL.getAdmin().createTable(desc); - } - - @Test(expected = DoNotRetryIOException.class) - public void testModifyTableWithZeroRegionReplicas() throws Exception { - TableName tableName = name.getTableName(); - TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build(); - - TEST_UTIL.getAdmin().createTable(desc); - TableDescriptor newDesc = - TableDescriptorBuilder.newBuilder(desc).setRegionReplication(0).build(); - - TEST_UTIL.getAdmin().modifyTable(newDesc); - } - - @Test(timeout = 60000) - public void testModifyTableWithMemstoreData() throws Exception { - TableName tableName = name.getTableName(); - createTableAndValidateTableSchemaModification(tableName, true); - } - - @Test(timeout = 60000) - public void testDeleteCFWithMemstoreData() throws Exception { - TableName tableName = name.getTableName(); - createTableAndValidateTableSchemaModification(tableName, false); - } - - /** - * Create table and validate online schema modification - * @param tableName Table name - * @param modifyTable Modify table if true otherwise delete column family - * @throws IOException in case of failures - */ - private void createTableAndValidateTableSchemaModification(TableName tableName, - boolean modifyTable) throws Exception { - Admin admin = TEST_UTIL.getAdmin(); - // Create table with two Cfs - byte[] cf1 = Bytes.toBytes("cf1"); - byte[] cf2 = Bytes.toBytes("cf2"); - TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf1)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf2)).build(); - admin.createTable(tableDesc); - - Table t = TEST_UTIL.getConnection().getTable(tableName); - // Insert few records and flush the table - t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val1"))); - t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2"))); - admin.flush(tableName); - Path tableDir = CommonFSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), tableName); - List regionDirs = FSUtils.getRegionDirs(TEST_UTIL.getTestFileSystem(), tableDir); - assertEquals(1, regionDirs.size()); - List familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0)); - assertEquals(2, familyDirs.size()); - - // Insert record but dont flush the table - t.put(new Put(ROW).addColumn(cf1, QUALIFIER, Bytes.toBytes("val2"))); - t.put(new Put(ROW).addColumn(cf2, QUALIFIER, Bytes.toBytes("val2"))); - - if (modifyTable) { - tableDesc = TableDescriptorBuilder.newBuilder(tableDesc).removeColumnFamily(cf2).build(); - admin.modifyTable(tableDesc); - } else { - admin.deleteColumnFamily(tableName, cf2); - } - // After table modification or delete family there should be only one CF in FS - familyDirs = FSUtils.getFamilyDirs(TEST_UTIL.getTestFileSystem(), regionDirs.get(0)); - assertEquals("CF dir count should be 1, but was " + familyDirs.size(), 1, familyDirs.size()); + @BeforeAll + public static void setUpBeforeClass() throws Exception { + initialize(NoOpScanPolicyObserver.class, MultiRowMutationEndpoint.class); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5WithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5WithCoprocessor.java new file mode 100644 index 000000000000..b7203a0b5858 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5WithCoprocessor.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; + +/** + * Run tests that use the HBase clients; {@link Table}. Sets up the HBase mini cluster once at start + * and runs through all client tests. Each creates a table named for the method and does its stuff + * against that. Parameterized to run with different registry implementations. + */ +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSide5WithCoprocessor extends FromClientSideTest5 { + + public TestFromClientSide5WithCoprocessor(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + @BeforeAll + public static void setUpBeforeClass() throws Exception { + initialize(MultiRowMutationEndpoint.class); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideFilterAcrossMultipleRegions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideFilterAcrossMultipleRegions.java new file mode 100644 index 000000000000..ab7b10801b25 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideFilterAcrossMultipleRegions.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; + +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSideFilterAcrossMultipleRegions + extends FromClientSideTestFilterAcrossMultipleRegions { + + public TestFromClientSideFilterAcrossMultipleRegions( + Class registryImpl, int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + @BeforeAll + public static void setUpBeforeClass() throws Exception { + initialize(MultiRowMutationEndpoint.class); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideFilterAcrossMultipleRegionsWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideFilterAcrossMultipleRegionsWithCoprocessor.java new file mode 100644 index 000000000000..b85399850d28 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideFilterAcrossMultipleRegionsWithCoprocessor.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; + +@Tag(LargeTests.TAG) +@Tag(ClientTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSideFilterAcrossMultipleRegionsWithCoprocessor + extends FromClientSideTestFilterAcrossMultipleRegions { + + public TestFromClientSideFilterAcrossMultipleRegionsWithCoprocessor( + Class registryImpl, int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } + + @BeforeAll + public static void setUpBeforeClass() throws Exception { + initialize(NoOpScanPolicyObserver.class, MultiRowMutationEndpoint.class); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java index 85d06293e111..0a3c390f367f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java @@ -17,64 +17,53 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNameTestExtension; import org.apache.hadoop.hbase.ipc.AbstractRpcClient; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; /** * Do some ops and prove that client and server can work w/o codecs; that we can pb all the time. * Good for third-party clients or simple scripts that want to talk direct to hbase. */ -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestFromClientSideNoCodec { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSideNoCodec.class); + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); - protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + @RegisterExtension + private TableNameTestExtension name = new TableNameTestExtension(); - @Rule - public TestName name = new TestName(); - - /** - * @throws java.lang.Exception - */ - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { // Turn off codec use TEST_UTIL.getConfiguration().set("hbase.client.default.rpc.codec", ""); TEST_UTIL.startMiniCluster(1); } - /** - * @throws java.lang.Exception - */ - @AfterClass + @AfterAll public static void tearDownAfterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @Test public void testBasics() throws IOException { - final TableName tableName = TableName.valueOf(name.getMethodName()); + final TableName tableName = name.getTableName(); final byte[][] fs = new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; Table ht = TEST_UTIL.createTable(tableName, fs); @@ -90,15 +79,15 @@ public void testBasics() throws IOException { for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); byte[] f = fs[i++]; - assertTrue(Bytes.toString(f), Bytes.equals(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength(), f, 0, f.length)); + assertTrue(Bytes.equals(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), f, + 0, f.length), Bytes.toString(f)); } // Check getRowOrBefore byte[] f = fs[0]; Get get = new Get(row); get.addFamily(f); r = ht.get(get); - assertTrue(r.toString(), r.containsColumn(f, f)); + assertTrue(r.containsColumn(f, f), r.toString()); // Check scan. ResultScanner scanner = ht.getScanner(new Scan()); int count = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java index 2c8f4d201870..1c2c1087d37f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java @@ -17,227 +17,17 @@ */ package org.apache.hadoop.hbase.client; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.NavigableSet; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.ExtendedCell; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.exceptions.ScannerResetException; -import org.apache.hadoop.hbase.regionserver.DelegatingKeyValueScanner; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.KeyValueScanner; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.ReversedStoreScanner; -import org.apache.hadoop.hbase.regionserver.ScanInfo; -import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.wal.WAL; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -@Category({ MediumTests.class, ClientTests.class }) -public class TestFromClientSideScanExcpetion { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSideScanExcpetion.class); - - protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; - private static byte[] FAMILY = Bytes.toBytes("testFamily"); +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) +public class TestFromClientSideScanExcpetion extends FromClientSideScanExcpetionTestBase { - private static int SLAVES = 3; - - @Rule - public TestName name = new TestName(); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3); - conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 6000000); - conf.setClass(HConstants.REGION_IMPL, MyHRegion.class, HRegion.class); - conf.setBoolean("hbase.client.log.scanner.activity", true); - // We need more than one region server in this test - TEST_UTIL.startMiniCluster(SLAVES); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - private static AtomicBoolean ON = new AtomicBoolean(false); - private static AtomicLong REQ_COUNT = new AtomicLong(0); - private static AtomicBoolean IS_DO_NOT_RETRY = new AtomicBoolean(false); // whether to throw - // DNRIOE - private static AtomicBoolean THROW_ONCE = new AtomicBoolean(true); // whether to only throw once - - private static void reset() { - ON.set(false); - REQ_COUNT.set(0); - IS_DO_NOT_RETRY.set(false); - THROW_ONCE.set(true); - } - - private static void inject() { - ON.set(true); - } - - public static final class MyHRegion extends HRegion { - - @SuppressWarnings("deprecation") - public MyHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration confParam, - RegionInfo regionInfo, TableDescriptor htd, RegionServerServices rsServices) { - super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); - } - - @Override - protected HStore instantiateHStore(ColumnFamilyDescriptor family, boolean warmup) - throws IOException { - return new MyHStore(this, family, conf, warmup); - } - } - - public static final class MyHStore extends HStore { - - public MyHStore(HRegion region, ColumnFamilyDescriptor family, Configuration confParam, - boolean warmup) throws IOException { - super(region, family, confParam, warmup); - } - - @Override - protected KeyValueScanner createScanner(Scan scan, ScanInfo scanInfo, - NavigableSet targetCols, long readPt) throws IOException { - return scan.isReversed() - ? new ReversedStoreScanner(this, scanInfo, scan, targetCols, readPt) - : new MyStoreScanner(this, scanInfo, scan, targetCols, readPt); - } - } - - public static final class MyStoreScanner extends StoreScanner { - public MyStoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet columns, - long readPt) throws IOException { - super(store, scanInfo, scan, columns, readPt); - } - - @Override - protected List selectScannersFrom(HStore store, - List allScanners) { - List scanners = super.selectScannersFrom(store, allScanners); - List newScanners = new ArrayList<>(scanners.size()); - for (KeyValueScanner scanner : scanners) { - newScanners.add(new DelegatingKeyValueScanner(scanner) { - @Override - public boolean reseek(ExtendedCell key) throws IOException { - if (ON.get()) { - REQ_COUNT.incrementAndGet(); - if (!THROW_ONCE.get() || REQ_COUNT.get() == 1) { - if (IS_DO_NOT_RETRY.get()) { - throw new DoNotRetryIOException("Injected exception"); - } else { - throw new IOException("Injected exception"); - } - } - } - return super.reseek(key); - } - }); - } - return newScanners; - } - } - - /** - * Tests the case where a Scan can throw an IOException in the middle of the seek / reseek leaving - * the server side RegionScanner to be in dirty state. The client has to ensure that the - * ClientScanner does not get an exception and also sees all the data. - */ - @Test - public void testClientScannerIsResetWhenScanThrowsIOException() - throws IOException, InterruptedException { - reset(); - THROW_ONCE.set(true); // throw exceptions only once - TableName tableName = TableName.valueOf(name.getMethodName()); - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - int rowCount = TEST_UTIL.loadTable(t, FAMILY, false); - TEST_UTIL.getAdmin().flush(tableName); - inject(); - int actualRowCount = TEST_UTIL.countRows(t, new Scan().addColumn(FAMILY, FAMILY)); - assertEquals(rowCount, actualRowCount); - } - assertTrue(REQ_COUNT.get() > 0); - } - - /** - * Tests the case where a coprocessor throws a DoNotRetryIOException in the scan. The expectation - * is that the exception will bubble up to the client scanner instead of being retried. - */ - @Test - public void testScannerThrowsExceptionWhenCoprocessorThrowsDNRIOE() - throws IOException, InterruptedException { - reset(); - IS_DO_NOT_RETRY.set(true); - TableName tableName = TableName.valueOf(name.getMethodName()); - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - TEST_UTIL.loadTable(t, FAMILY, false); - TEST_UTIL.getAdmin().flush(tableName); - inject(); - TEST_UTIL.countRows(t, new Scan().addColumn(FAMILY, FAMILY)); - fail("Should have thrown an exception"); - } catch (DoNotRetryIOException expected) { - // expected - } - assertTrue(REQ_COUNT.get() > 0); - } - - /** - * Tests the case where a coprocessor throws a regular IOException in the scan. The expectation is - * that the we will keep on retrying, but fail after the retries are exhausted instead of retrying - * indefinitely. - */ - @Test - public void testScannerFailsAfterRetriesWhenCoprocessorThrowsIOE() - throws IOException, InterruptedException { - TableName tableName = TableName.valueOf(name.getMethodName()); - reset(); - THROW_ONCE.set(false); // throw exceptions in every retry - try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) { - TEST_UTIL.loadTable(t, FAMILY, false); - TEST_UTIL.getAdmin().flush(tableName); - inject(); - TEST_UTIL.countRows(t, new Scan().addColumn(FAMILY, FAMILY)); - fail("Should have thrown an exception"); - } catch (ScannerResetException expected) { - // expected - } catch (RetriesExhaustedException e) { - // expected - assertThat(e.getCause(), instanceOf(ScannerResetException.class)); - } - assertTrue(REQ_COUNT.get() >= 3); + startCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.java index a6df47b73eb2..903a0c5afa6d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetionWithCoprocessor.java @@ -18,33 +18,28 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; /** * Test all client operations with a coprocessor that just implements the default flush/compact/scan * policy. */ -@Category({ MediumTests.class, ClientTests.class }) +@Tag(MediumTests.TAG) +@Tag(ClientTests.TAG) public class TestFromClientSideScanExcpetionWithCoprocessor - extends TestFromClientSideScanExcpetion { + extends FromClientSideScanExcpetionTestBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSideScanExcpetionWithCoprocessor.class); - - @BeforeClass + @BeforeAll public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName()); - TestFromClientSideScanExcpetion.setUpBeforeClass(); + startCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java deleted file mode 100644 index 6f84556c41aa..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.util.Arrays; -import java.util.Collection; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; -import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.AfterClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; -import org.junit.runners.Parameterized; - -/** - * Test all client operations with a coprocessor that just implements the default flush/compact/scan - * policy. - *

- * Base class was split into three so this class got split into three. See below for other parts. - * @see TestFromClientSide4 - * @see TestFromClientSide5 - */ -@Category({ LargeTests.class, ClientTests.class }) -public class TestFromClientSideWithCoprocessor extends TestFromClientSide { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSideWithCoprocessor.class); - - // Override the parameters from the parent class. We just want to run it for the default - // param combination. - @Parameterized.Parameters - public static Collection parameters() { - return Arrays - .asList(new Object[][] { { MasterRegistry.class, 1 }, { ZKConnectionRegistry.class, 1 } }); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - afterClass(); - } - - public TestFromClientSideWithCoprocessor(Class registry, int numHedgedReqs) throws Exception { - initialize(registry, numHedgedReqs, NoOpScanPolicyObserver.class, - MultiRowMutationEndpoint.class); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor4.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor4.java deleted file mode 100644 index 6fd0c1904810..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor4.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.util.Arrays; -import java.util.Collection; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; -import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.AfterClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; -import org.junit.runners.Parameterized; - -/** - * Test all client operations with a coprocessor that just implements the default flush/compact/scan - * policy. - *

- * Base class was split into three so this class got split into three. - */ -@Category({ LargeTests.class, ClientTests.class }) -public class TestFromClientSideWithCoprocessor4 extends TestFromClientSide4 { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSideWithCoprocessor4.class); - - // Override the parameters from the parent class. We just want to run it for the default - // param combination. - @Parameterized.Parameters - public static Collection parameters() { - return Arrays - .asList(new Object[][] { { MasterRegistry.class, 1 }, { ZKConnectionRegistry.class, 1 } }); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - afterClass(); - } - - public TestFromClientSideWithCoprocessor4(Class registry, int numHedgedReqs) throws Exception { - initialize(registry, numHedgedReqs, NoOpScanPolicyObserver.class, - MultiRowMutationEndpoint.class); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor5.java deleted file mode 100644 index 759360f4b5e3..000000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor5.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.util.Arrays; -import java.util.List; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; -import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.junit.AfterClass; -import org.junit.ClassRule; -import org.junit.experimental.categories.Category; -import org.junit.runners.Parameterized.Parameters; - -/** - * Test all client operations with a coprocessor that just implements the default flush/compact/scan - * policy. - */ -@Category({ LargeTests.class, ClientTests.class }) -public class TestFromClientSideWithCoprocessor5 extends TestFromClientSide5 { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFromClientSideWithCoprocessor5.class); - - // Override the parameters from the parent class. We just want to run it for the default - // param combination. - @Parameters(name = "{index}: registry={0}, numHedgedReqs={1}") - public static List parameters() { - return Arrays.asList(new Object[] { MasterRegistry.class, 1 }, - new Object[] { ZKConnectionRegistry.class, 1 }); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - afterClass(); - } - - public TestFromClientSideWithCoprocessor5(Class registry, - int numHedgedReqs) throws Exception { - initialize(registry, numHedgedReqs, NoOpScanPolicyObserver.class, - MultiRowMutationEndpoint.class); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java index be70c4c5cd64..1e8cdb2f3203 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java @@ -531,7 +531,7 @@ public void testIncrementWithCustomTimestamp() throws IOException { */ static void assertIncrementKey(Cell key, byte[] row, byte[] family, byte[] qualifier, long value) throws Exception { - TestFromClientSide.assertIncrementKey(key, row, family, qualifier, value); + FromClientSideTestBase.assertIncrementKey(key, row, family, qualifier, value); } public static String filterStringSoTableNameSafe(final String str) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index 99439a482297..9b623d9ada29 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.RPC_CODEC_CONF_KEY; -import static org.apache.hadoop.hbase.client.FromClientSide3TestBase.generateHugeValue; +import static org.apache.hadoop.hbase.client.FromClientSideTest3.generateHugeValue; import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_CODEC_CLASS; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableScanMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableScanMetrics.java index 607bfc228239..e35fa617cecd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableScanMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableScanMetrics.java @@ -24,7 +24,12 @@ import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME; import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.RPC_SCAN_PROCESSING_TIME_METRIC_NAME; import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.RPC_SCAN_QUEUE_WAIT_TIME_METRIC_NAME; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.Arrays; @@ -40,8 +45,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -53,20 +59,16 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FutureUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.runners.Parameterized.Parameter; -import org.junit.runners.Parameterized.Parameters; - -@Category({ ClientTests.class, LargeTests.class }) -public class TestTableScanMetrics extends FromClientSideBase { - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableScanMetrics.class); +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.params.provider.Arguments; + +@Tag(ClientTests.TAG) +@Tag(LargeTests.TAG) +@HBaseParameterizedTestTemplate(name = "{index}: scanner={0}") +public class TestTableScanMetrics { private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -85,19 +87,18 @@ public class TestTableScanMetrics extends FromClientSideBase { private static Connection CONN; - @Parameters(name = "{index}: scanner={0}") - public static List params() { - return Arrays.asList(new Object[] { "ForwardScanner", new Scan() }, - new Object[] { "ReverseScanner", new Scan().setReversed(true) }); + public static Stream parameters() { + return Stream.of(Arguments.of("ForwardScanner", new Scan()), + Arguments.of("ReverseScanner", new Scan().setReversed(true))); } - @Parameter(0) - public String scannerName; + private Scan originalScan; - @Parameter(1) - public Scan originalScan; + public TestTableScanMetrics(String scannerName, Scan originalScan) { + this.originalScan = originalScan; + } - @BeforeClass + @BeforeAll public static void setUp() throws Exception { // Start the minicluster TEST_UTIL.startMiniCluster(2); @@ -112,7 +113,7 @@ public static void setUp() throws Exception { NUM_REGIONS = TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).size(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } @@ -135,150 +136,147 @@ private ScanMetrics assertScannedRowsAndGetScanMetrics(Scan scan, int expectedCo ScanMetrics scanMetrics; try (Table table = CONN.getTable(TABLE_NAME); ResultScanner scanner = table.getScanner(scan)) { for (Result result : scanner) { - Assert.assertFalse(result.isEmpty()); + assertFalse(result.isEmpty()); countOfRows++; } scanMetrics = scanner.getScanMetrics(); } - Assert.assertEquals(expectedCount, countOfRows); + assertEquals(expectedCount, countOfRows); return scanMetrics; } - @Test + @TestTemplate public void testScanMetricsDisabled() throws Exception { Scan scan = generateScan(Bytes.toBytes("xxx1"), Bytes.toBytes("zzz1")); ScanMetrics scanMetrics = assertScannedRowsAndGetScanMetrics(scan, 3); - Assert.assertNull(scanMetrics); + assertNull(scanMetrics); } - @Test + @TestTemplate public void testScanMetricsWithScanMetricByRegionDisabled() throws Exception { Scan scan = generateScan(Bytes.toBytes("xxx1"), Bytes.toBytes("zzz1")); scan.setScanMetricsEnabled(true); int expectedRowsScanned = 3; ScanMetrics scanMetrics = assertScannedRowsAndGetScanMetrics(scan, expectedRowsScanned); - Assert.assertNotNull(scanMetrics); + assertNotNull(scanMetrics); Map metricsMap = scanMetrics.getMetricsMap(false); // The test setup is such that we have 1 row per region in the scan range - Assert.assertEquals(expectedRowsScanned, scanMetrics.countOfRegions.get()); - Assert.assertEquals(expectedRowsScanned, - (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); - Assert.assertTrue(scanMetrics.collectMetricsByRegion().isEmpty()); + assertEquals(expectedRowsScanned, scanMetrics.countOfRegions.get()); + assertEquals(expectedRowsScanned, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + assertTrue(scanMetrics.collectMetricsByRegion().isEmpty()); } - @Test + @TestTemplate public void testScanMetricsResetWithScanMetricsByRegionDisabled() throws Exception { Scan scan = generateScan(Bytes.toBytes("xxx1"), Bytes.toBytes("zzz1")); scan.setScanMetricsEnabled(true); int expectedRowsScanned = 3; ScanMetrics scanMetrics = assertScannedRowsAndGetScanMetrics(scan, expectedRowsScanned); - Assert.assertNotNull(scanMetrics); + assertNotNull(scanMetrics); // By default counters are collected with reset as true Map metricsMap = scanMetrics.getMetricsMap(); - Assert.assertEquals(expectedRowsScanned, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); - Assert.assertEquals(expectedRowsScanned, - (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + assertEquals(expectedRowsScanned, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(expectedRowsScanned, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); // Subsequent call to get scan metrics map should show all counters as 0 - Assert.assertEquals(0, scanMetrics.countOfRegions.get()); - Assert.assertEquals(0, scanMetrics.countOfRowsScanned.get()); + assertEquals(0, scanMetrics.countOfRegions.get()); + assertEquals(0, scanMetrics.countOfRowsScanned.get()); } - @Test + @TestTemplate public void testScanMetricsByRegionForSingleRegionScan() throws Exception { Scan scan = generateScan(Bytes.toBytes("xxx1"), Bytes.toBytes("xxx1")); scan.setEnableScanMetricsByRegion(true); int expectedRowsScanned = 1; ScanMetrics scanMetrics = assertScannedRowsAndGetScanMetrics(scan, expectedRowsScanned); - Assert.assertNotNull(scanMetrics); + assertNotNull(scanMetrics); Map metricsMap = scanMetrics.getMetricsMap(false); - Assert.assertEquals(expectedRowsScanned, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); - Assert.assertEquals(expectedRowsScanned, - (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + assertEquals(expectedRowsScanned, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(expectedRowsScanned, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); Map> scanMetricsByRegion = scanMetrics.collectMetricsByRegion(false); - Assert.assertEquals(expectedRowsScanned, scanMetricsByRegion.size()); + assertEquals(expectedRowsScanned, scanMetricsByRegion.size()); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); metricsMap = entry.getValue(); - Assert.assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); - Assert.assertNotNull(scanMetricsRegionInfo.getServerName()); + assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); + assertNotNull(scanMetricsRegionInfo.getServerName()); // As we are scanning single row so, overall scan metrics will match per region scan metrics - Assert.assertEquals(expectedRowsScanned, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); - Assert.assertEquals(expectedRowsScanned, + assertEquals(expectedRowsScanned, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(expectedRowsScanned, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); } } - @Test + @TestTemplate public void testScanMetricsByRegionForMultiRegionScan() throws Exception { Scan scan = generateScan(EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY); scan.setEnableScanMetricsByRegion(true); int expectedRowsScanned = 3; ScanMetrics scanMetrics = assertScannedRowsAndGetScanMetrics(scan, expectedRowsScanned); - Assert.assertNotNull(scanMetrics); - Assert.assertEquals(NUM_REGIONS, scanMetrics.countOfRegions.get()); - Assert.assertEquals(expectedRowsScanned, scanMetrics.countOfRowsScanned.get()); + assertNotNull(scanMetrics); + assertEquals(NUM_REGIONS, scanMetrics.countOfRegions.get()); + assertEquals(expectedRowsScanned, scanMetrics.countOfRowsScanned.get()); Map> scanMetricsByRegion = scanMetrics.collectMetricsByRegion(false); - Assert.assertEquals(NUM_REGIONS, scanMetricsByRegion.size()); + assertEquals(NUM_REGIONS, scanMetricsByRegion.size()); int rowsScannedAcrossAllRegions = 0; for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); Map metricsMap = entry.getValue(); - Assert.assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); - Assert.assertNotNull(scanMetricsRegionInfo.getServerName()); - Assert.assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); + assertNotNull(scanMetricsRegionInfo.getServerName()); + assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); if (metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME) == 1) { rowsScannedAcrossAllRegions++; } else { assertEquals(0, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); } } - Assert.assertEquals(expectedRowsScanned, rowsScannedAcrossAllRegions); + assertEquals(expectedRowsScanned, rowsScannedAcrossAllRegions); } - @Test + @TestTemplate public void testScanMetricsByRegionReset() throws Exception { Scan scan = generateScan(Bytes.toBytes("xxx1"), Bytes.toBytes("zzz1")); scan.setEnableScanMetricsByRegion(true); int expectedRowsScanned = 3; ScanMetrics scanMetrics = assertScannedRowsAndGetScanMetrics(scan, expectedRowsScanned); - Assert.assertNotNull(scanMetrics); + assertNotNull(scanMetrics); // Retrieve scan metrics by region as a map and reset Map> scanMetricsByRegion = scanMetrics.collectMetricsByRegion(); // We scan 1 row per region - Assert.assertEquals(expectedRowsScanned, scanMetricsByRegion.size()); + assertEquals(expectedRowsScanned, scanMetricsByRegion.size()); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); Map metricsMap = entry.getValue(); - Assert.assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); - Assert.assertNotNull(scanMetricsRegionInfo.getServerName()); - Assert.assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); - Assert.assertEquals(1, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); + assertNotNull(scanMetricsRegionInfo.getServerName()); + assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(1, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); } // Scan metrics have already been reset and now all counters should be 0 scanMetricsByRegion = scanMetrics.collectMetricsByRegion(false); // Size of map should be same as earlier - Assert.assertEquals(expectedRowsScanned, scanMetricsByRegion.size()); + assertEquals(expectedRowsScanned, scanMetricsByRegion.size()); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); Map metricsMap = entry.getValue(); - Assert.assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); - Assert.assertNotNull(scanMetricsRegionInfo.getServerName()); + assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); + assertNotNull(scanMetricsRegionInfo.getServerName()); // Counters should have been reset to 0 - Assert.assertEquals(0, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); - Assert.assertEquals(0, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); + assertEquals(0, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(0, (long) metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); } } - @Test + @TestTemplate public void testConcurrentUpdatesAndResetOfScanMetricsByRegion() throws Exception { ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(2); TableName tableName = TableName.valueOf(TestTableScanMetrics.class.getSimpleName() @@ -300,7 +298,7 @@ public void testConcurrentUpdatesAndResetOfScanMetricsByRegion() throws Exceptio Runnable tableScanner = new Runnable() { public void run() { for (Result r : rs) { - Assert.assertFalse(r.isEmpty()); + assertFalse(r.isEmpty()); rowsScanned.incrementAndGet(); } latch.countDown(); @@ -314,7 +312,7 @@ public void run() { // Merge leftover scan metrics mergeScanMetricsByRegion(scanMetrics.collectMetricsByRegion(), concurrentScanMetricsByRegion); - Assert.assertEquals(HBaseTestingUtil.ROWS.length, rowsScanned.get()); + assertEquals(HBaseTestingUtil.ROWS.length, rowsScanned.get()); } Map> expectedScanMetricsByRegion; @@ -328,10 +326,10 @@ public void run() { ScanMetrics scanMetrics = rs.getScanMetrics(); int rowsScanned = 0; for (Result r : rs) { - Assert.assertFalse(r.isEmpty()); + assertFalse(r.isEmpty()); rowsScanned++; } - Assert.assertEquals(HBaseTestingUtil.ROWS.length, rowsScanned); + assertEquals(HBaseTestingUtil.ROWS.length, rowsScanned); expectedScanMetricsByRegion = scanMetrics.collectMetricsByRegion(); for (Map.Entry> entry : expectedScanMetricsByRegion .entrySet()) { @@ -341,24 +339,23 @@ public void run() { metricsMap.remove(MILLIS_BETWEEN_NEXTS_METRIC_NAME); metricsMap.remove(RPC_SCAN_PROCESSING_TIME_METRIC_NAME); metricsMap.remove(RPC_SCAN_QUEUE_WAIT_TIME_METRIC_NAME); - Assert.assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); - Assert.assertNotNull(scanMetricsRegionInfo.getServerName()); - Assert.assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertNotNull(scanMetricsRegionInfo.getEncodedRegionName()); + assertNotNull(scanMetricsRegionInfo.getServerName()); + assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); // Each region will have 26 * 26 + 26 + 1 rows except last region which will have 1 row long rowsScannedFromMetrics = metricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME); - Assert.assertTrue( - rowsScannedFromMetrics == 1 || rowsScannedFromMetrics == (26 * 26 + 26 + 1)); + assertTrue(rowsScannedFromMetrics == 1 || rowsScannedFromMetrics == (26 * 26 + 26 + 1)); } } // Assert on scan metrics by region - Assert.assertEquals(expectedScanMetricsByRegion, concurrentScanMetricsByRegion); + assertEquals(expectedScanMetricsByRegion, concurrentScanMetricsByRegion); } finally { TEST_UTIL.deleteTable(tableName); } } - @Test + @TestTemplate public void testRPCCallProcessingAndQueueWaitTimeMetrics() throws Exception { final int numThreads = 20; Configuration conf = TEST_UTIL.getConfiguration(); @@ -367,7 +364,7 @@ public void testRPCCallProcessingAndQueueWaitTimeMetrics() throws Exception { HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); // Keep the number of threads to be high enough for RPC calls to queue up. For now going with 6 // times the handler count. - Assert.assertTrue(numThreads > 6 * handlerCount); + assertTrue(numThreads > 6 * handlerCount); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numThreads); TableName tableName = TableName.valueOf( TestTableScanMetrics.class.getSimpleName() + "_testRPCCallProcessingAndQueueWaitTimeMetrics"); @@ -387,7 +384,7 @@ public void run() { try (ResultScanner rs = table.getScanner(scan)) { Result r; while ((r = rs.next()) != null) { - Assert.assertFalse(r.isEmpty()); + assertFalse(r.isEmpty()); } ScanMetrics scanMetrics = rs.getScanMetrics(); Map metricsMap = scanMetrics.getMetricsMap(); @@ -404,14 +401,14 @@ public void run() { latch.await(); executor.shutdown(); executor.awaitTermination(10, TimeUnit.SECONDS); - Assert.assertTrue(totalScanRpcTime.get() > 0); - Assert.assertTrue(totalQueueWaitTime.get() > 0); + assertTrue(totalScanRpcTime.get() > 0); + assertTrue(totalQueueWaitTime.get() > 0); } finally { TEST_UTIL.deleteTable(tableName); } } - @Test + @TestTemplate public void testScanMetricsByRegionWithRegionMove() throws Exception { TableName tableName = TableName.valueOf( TestTableScanMetrics.class.getSimpleName() + "testScanMetricsByRegionWithRegionMove"); @@ -446,7 +443,7 @@ public void testScanMetricsByRegionWithRegionMove() throws Exception { isFirstScanOfRegion = false; } } - Assert.assertNotNull(movedRegion); + assertNotNull(movedRegion); scanMetrics = rs.getScanMetrics(); Map> scanMetricsByRegion = @@ -455,7 +452,7 @@ public void testScanMetricsByRegionWithRegionMove() throws Exception { Set serversForMovedRegion = new HashSet<>(); // 2 regions scanned with two entries for first region as it moved in b/w scan - Assert.assertEquals(3, scanMetricsByRegion.size()); + assertEquals(3, scanMetricsByRegion.size()); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); @@ -465,20 +462,20 @@ public void testScanMetricsByRegionWithRegionMove() throws Exception { actualCountOfRowsScannedInMovedRegion += rowsScanned; serversForMovedRegion.add(scanMetricsRegionInfo.getServerName()); - Assert.assertEquals(1, (long) metricsMap.get(RPC_RETRIES_METRIC_NAME)); + assertEquals(1, (long) metricsMap.get(RPC_RETRIES_METRIC_NAME)); } - Assert.assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); } - Assert.assertEquals(expectedCountOfRowsScannedInMovedRegion, + assertEquals(expectedCountOfRowsScannedInMovedRegion, actualCountOfRowsScannedInMovedRegion); - Assert.assertEquals(2, serversForMovedRegion.size()); + assertEquals(2, serversForMovedRegion.size()); } } finally { TEST_UTIL.deleteTable(tableName); } } - @Test + @TestTemplate public void testScanMetricsByRegionWithRegionSplit() throws Exception { TableName tableName = TableName.valueOf( TestTableScanMetrics.class.getSimpleName() + "testScanMetricsByRegionWithRegionSplit"); @@ -506,7 +503,7 @@ public void testScanMetricsByRegionWithRegionSplit() throws Exception { try (ResultScanner rs = table.getScanner(scan)) { boolean isFirstScanOfRegion = true; - for (Result r : rs) { + while (rs.next() != null) { if (isFirstScanOfRegion) { splitRegion(tableName, bbb, bmw) .forEach(region -> expectedSplitRegionRes.add(Bytes.toString(region))); @@ -523,7 +520,7 @@ public void testScanMetricsByRegionWithRegionSplit() throws Exception { Set splitRegionRes = new HashSet<>(); // 1 entry each for parent and two child regions - Assert.assertEquals(3, scanMetricsByRegion.size()); + assertEquals(3, scanMetricsByRegion.size()); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); @@ -536,18 +533,18 @@ public void testScanMetricsByRegionWithRegionSplit() throws Exception { rpcRetiesCount++; } - Assert.assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); } - Assert.assertEquals(expectedCountOfRowsScannedInRegion, actualCountOfRowsScannedInRegion); - Assert.assertEquals(2, rpcRetiesCount); - Assert.assertEquals(expectedSplitRegionRes, splitRegionRes); + assertEquals(expectedCountOfRowsScannedInRegion, actualCountOfRowsScannedInRegion); + assertEquals(2, rpcRetiesCount); + assertEquals(expectedSplitRegionRes, splitRegionRes); } } finally { TEST_UTIL.deleteTable(tableName); } } - @Test + @TestTemplate public void testScanMetricsByRegionWithRegionMerge() throws Exception { TableName tableName = TableName.valueOf( TestTableScanMetrics.class.getSimpleName() + "testScanMetricsByRegionWithRegionMerge"); @@ -576,7 +573,7 @@ public void testScanMetricsByRegionWithRegionMerge() throws Exception { try (ResultScanner rs = table.getScanner(scan)) { boolean isFirstScanOfRegion = true; - for (Result r : rs) { + while (rs.next() != null) { if (isFirstScanOfRegion) { List out = mergeRegions(tableName, bbb, ccc); // Entry with index 2 is the encoded region name of merged region @@ -594,7 +591,7 @@ public void testScanMetricsByRegionWithRegionMerge() throws Exception { boolean containsMergedRegionInScanMetrics = false; // 1 entry each for old region from which first row was scanned and new merged region - Assert.assertEquals(2, scanMetricsByRegion.size()); + assertEquals(2, scanMetricsByRegion.size()); for (Map.Entry> entry : scanMetricsByRegion .entrySet()) { ScanMetricsRegionInfo scanMetricsRegionInfo = entry.getKey(); @@ -606,12 +603,12 @@ public void testScanMetricsByRegionWithRegionMerge() throws Exception { containsMergedRegionInScanMetrics = true; } - Assert.assertEquals(1, (long) metricsMap.get(RPC_RETRIES_METRIC_NAME)); - Assert.assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); + assertEquals(1, (long) metricsMap.get(RPC_RETRIES_METRIC_NAME)); + assertEquals(1, (long) metricsMap.get(REGIONS_SCANNED_METRIC_NAME)); } - Assert.assertEquals(expectedCountOfRowsScannedInRegions, actualCountOfRowsScannedInRegions); - Assert.assertTrue(expectedMergeRegionsRes.containsAll(mergeRegionsRes)); - Assert.assertTrue(containsMergedRegionInScanMetrics); + assertEquals(expectedCountOfRowsScannedInRegions, actualCountOfRowsScannedInRegions); + assertTrue(expectedMergeRegionsRes.containsAll(mergeRegionsRes)); + assertTrue(containsMergedRegionInScanMetrics); } } finally { TEST_UTIL.deleteTable(tableName); @@ -679,7 +676,7 @@ private byte[] moveRegion(TableName tableName, byte[] startRow) throws IOExcepti ServerName finalServerName = regionLocator.getRegionLocation(startRow, true).getServerName(); // Assert that region actually moved - Assert.assertNotEquals(initialServerName, finalServerName); + assertNotEquals(initialServerName, finalServerName); return encodedRegionName; } @@ -703,8 +700,8 @@ private List splitRegion(TableName tableName, byte[] startRow, byte[] sp ServerName initialBottomServerName = bottomLoc.getServerName(); // Assert region is ready for split - Assert.assertEquals(initialTopServerName, initialBottomServerName); - Assert.assertEquals(initialEncodedTopRegionName, initialEncodedBottomRegionName); + assertEquals(initialTopServerName, initialBottomServerName); + assertEquals(initialEncodedTopRegionName, initialEncodedBottomRegionName); FutureUtils.get(admin.splitRegionAsync(initialEncodedTopRegionName, splitKey)); @@ -714,9 +711,9 @@ private List splitRegion(TableName tableName, byte[] startRow, byte[] sp byte[] finalEncodedBottomRegionName = bottomLoc.getRegion().getEncodedNameAsBytes(); // Assert that region split is complete - Assert.assertNotEquals(finalEncodedTopRegionName, finalEncodedBottomRegionName); - Assert.assertNotEquals(initialEncodedTopRegionName, finalEncodedBottomRegionName); - Assert.assertNotEquals(initialEncodedBottomRegionName, finalEncodedTopRegionName); + assertNotEquals(finalEncodedTopRegionName, finalEncodedBottomRegionName); + assertNotEquals(initialEncodedTopRegionName, finalEncodedBottomRegionName); + assertNotEquals(initialEncodedBottomRegionName, finalEncodedTopRegionName); return Arrays.asList(initialEncodedTopRegionName, finalEncodedTopRegionName, finalEncodedBottomRegionName); @@ -743,8 +740,8 @@ private List mergeRegions(TableName tableName, byte[] topRegion, byte[] String initialBottomRegionStartKey = Bytes.toString(bottomLoc.getRegion().getStartKey()); // Assert that regions are ready to be merged - Assert.assertNotEquals(initialEncodedTopRegionName, initialEncodedBottomRegionName); - Assert.assertEquals(initialBottomRegionStartKey, initialTopRegionEndKey); + assertNotEquals(initialEncodedTopRegionName, initialEncodedBottomRegionName); + assertEquals(initialBottomRegionStartKey, initialTopRegionEndKey); FutureUtils.get(admin.mergeRegionsAsync( new byte[][] { initialEncodedTopRegionName, initialEncodedBottomRegionName }, false)); @@ -755,9 +752,9 @@ private List mergeRegions(TableName tableName, byte[] topRegion, byte[] byte[] finalEncodedBottomRegionName = bottomLoc.getRegion().getEncodedNameAsBytes(); // Assert regions have been merges successfully - Assert.assertEquals(finalEncodedTopRegionName, finalEncodedBottomRegionName); - Assert.assertNotEquals(initialEncodedTopRegionName, finalEncodedTopRegionName); - Assert.assertNotEquals(initialEncodedBottomRegionName, finalEncodedTopRegionName); + assertEquals(finalEncodedTopRegionName, finalEncodedBottomRegionName); + assertNotEquals(initialEncodedTopRegionName, finalEncodedTopRegionName); + assertNotEquals(initialEncodedBottomRegionName, finalEncodedTopRegionName); return Arrays.asList(initialEncodedTopRegionName, initialEncodedBottomRegionName, finalEncodedTopRegionName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java index d32616d70bd0..cdcef8bb5096 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPostIncrementAndAppendBeforeWAL.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -import org.apache.hadoop.hbase.client.TestFromClientSide; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.security.access.AccessController; import org.apache.hadoop.hbase.security.access.Permission; @@ -77,7 +76,8 @@ public class TestPostIncrementAndAppendBeforeWAL { private String currentTestName; - private static final Logger LOG = LoggerFactory.getLogger(TestFromClientSide.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestPostIncrementAndAppendBeforeWAL.class); private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java index 7e886d4a9117..7ad6946bcf6e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.Optional; -import org.apache.hadoop.hbase.client.TestFromClientSideWithCoprocessor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -30,7 +29,7 @@ /** * RegionObserver that just reimplements the default behavior, in order to validate that all the * necessary APIs for this are public This observer is also used in - * {@link TestFromClientSideWithCoprocessor} and {@link TestCompactionWithCoprocessor} to make sure + * {@code TestFromClientSideWithCoprocessor} and {@link TestCompactionWithCoprocessor} to make sure * that a wide range of functionality still behaves as expected. */ public class NoOpScanPolicyObserver implements RegionCoprocessor, RegionObserver { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java index ffec01edafd7..6cd525f80aae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java @@ -20,7 +20,10 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.Mockito.mockStatic; -import org.apache.hadoop.hbase.client.FromClientSide3TestBase; +import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; +import org.apache.hadoop.hbase.client.ConnectionRegistry; +import org.apache.hadoop.hbase.client.FromClientSideTest3; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; @@ -30,7 +33,13 @@ @Tag(LargeTests.TAG) @Tag(ClientTests.TAG) -public class TestFromClientSide3WoUnsafe extends FromClientSide3TestBase { +@HBaseParameterizedTestTemplate(name = "{index}: registryImpl={0}, numHedgedReqs={1}") +public class TestFromClientSide3WoUnsafe extends FromClientSideTest3 { + + public TestFromClientSide3WoUnsafe(Class registryImpl, + int numHedgedReqs) { + super(registryImpl, numHedgedReqs); + } @BeforeAll public static void setUpBeforeAll() throws Exception { @@ -40,6 +49,6 @@ public static void setUpBeforeAll() throws Exception { assertFalse(ByteBufferUtils.UNSAFE_AVAIL); assertFalse(ByteBufferUtils.UNSAFE_UNALIGNED); } - startCluster(); + startCluster(MultiRowMutationEndpoint.class); } } From 136a927d4e64015aeaaafc5f36f6598fd8c5a09a Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 17 Apr 2026 19:13:58 +0800 Subject: [PATCH 2/4] fix compile --- .../org/apache/hadoop/hbase/client/FromClientSideTestBase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestBase.java index 8e199e574fb0..a7f9ce7e6657 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/FromClientSideTestBase.java @@ -58,7 +58,7 @@ import org.slf4j.LoggerFactory; public class FromClientSideTestBase { - private static final Logger LOG = LoggerFactory.getLogger(FromClientSideBase.class); + private static final Logger LOG = LoggerFactory.getLogger(FromClientSideTestBase.class); protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); static byte[] ROW = Bytes.toBytes("testRow"); static byte[] FAMILY = Bytes.toBytes("testFamily"); From b9db40c3ac5055a2a820c7de7dcefac88656fde5 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Fri, 17 Apr 2026 19:33:29 +0800 Subject: [PATCH 3/4] address review comments --- .../apache/hadoop/hbase/client/TestConnectionReconnect.java | 2 ++ .../org/apache/hadoop/hbase/client/TestTableScanMetrics.java | 3 +++ 2 files changed, 5 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionReconnect.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionReconnect.java index 0c4639e44afa..5803b4d3ad5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionReconnect.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionReconnect.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.TestTemplate; @@ -69,6 +70,7 @@ public static void setUpBeforeAll() throws Exception { UTIL.waitTableAvailable(NAME); } + @AfterAll public static void tearDownAfterAll() throws Exception { UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableScanMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableScanMetrics.java index e35fa617cecd..2f8d0cbe9127 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableScanMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableScanMetrics.java @@ -65,6 +65,8 @@ import org.junit.jupiter.api.TestTemplate; import org.junit.jupiter.params.provider.Arguments; +import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + @Tag(ClientTests.TAG) @Tag(LargeTests.TAG) @HBaseParameterizedTestTemplate(name = "{index}: scanner={0}") @@ -115,6 +117,7 @@ public static void setUp() throws Exception { @AfterAll public static void tearDown() throws Exception { + Closeables.close(CONN, true); TEST_UTIL.shutdownMiniCluster(); } From 8db1e7acc52d26a8463f8477fe1cadad6a7a6018 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Sat, 18 Apr 2026 14:10:37 +0800 Subject: [PATCH 4/4] reduce parameters for TestFromClientSide3WoUnsafe --- .../hbase/util/TestFromClientSide3WoUnsafe.java | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java index 6cd525f80aae..70b2f3d5f294 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFromClientSide3WoUnsafe.java @@ -20,15 +20,18 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.Mockito.mockStatic; +import java.util.stream.Stream; import org.apache.hadoop.hbase.HBaseParameterizedTestTemplate; import org.apache.hadoop.hbase.client.ConnectionRegistry; import org.apache.hadoop.hbase.client.FromClientSideTest3; +import org.apache.hadoop.hbase.client.RpcConnectionRegistry; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.unsafe.HBasePlatformDependent; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Tag; +import org.junit.jupiter.params.provider.Arguments; import org.mockito.MockedStatic; @Tag(LargeTests.TAG) @@ -51,4 +54,13 @@ public static void setUpBeforeAll() throws Exception { } startCluster(MultiRowMutationEndpoint.class); } + + // Override the parameters in parent class as we will find the parameters method from the current + // class first in HBaseParameterizedTemplateProvider. + // Tests will run much slower without Unsafe, and since this test is just to confirm that our code + // is still OK without Unsafe, and ZKConnectionRegistry does not use our Unsafe classes, so just + // run with RpcConnectionRegistry to speed up the tests. + public static Stream parameters() { + return Stream.of(Arguments.of(RpcConnectionRegistry.class, 2)); + } }