本文整理了Java中org.apache.hadoop.hbase.HBaseTestingUtility.loadTable()
方法的一些代码示例,展示了HBaseTestingUtility.loadTable()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HBaseTestingUtility.loadTable()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.HBaseTestingUtility
类名称:HBaseTestingUtility
方法名:loadTable
[英]Load table with rows from 'aaa' to 'zzz'.
[中]使用从“aaa”到“zzz”的行加载表。
代码示例来源:origin: apache/hbase
/**
* Load table of multiple column families with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Array of Families to load
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final Table t, final byte[][] f) throws IOException {
return loadTable(t, f, null);
}
代码示例来源:origin: apache/hbase
/**
* Load table of multiple column families with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Array of Families to load
* @param value the values of the cells. If null is passed, the row key is used as value
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
return loadTable(t, f, value, true);
}
代码示例来源:origin: apache/hbase
/**
* Load table with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Family
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final Table t, final byte[] f) throws IOException {
return loadTable(t, new byte[][] {f});
}
代码示例来源:origin: apache/hbase
/**
* Load table with rows from 'aaa' to 'zzz'.
* @param t Table
* @param f Family
* @return Count of rows loaded.
* @throws IOException
*/
public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
return loadTable(t, new byte[][] {f}, null, writeToWAL);
}
代码示例来源:origin: apache/hbase
public void loadData(final Table table, byte[]... families) throws Exception {
UTIL.loadTable(originalTable, TEST_FAM);
}
代码示例来源:origin: apache/hbase
private static void createTables() throws IOException, InterruptedException {
byte[][] FAMILIES = new byte [][] {Bytes.toBytes("f")};
for (TableName tableName : tables) {
Table table =
UTIL.createTable(tableName, FAMILIES, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
UTIL.waitTableAvailable(tableName);
UTIL.loadTable(table, FAMILIES[0]);
}
}
代码示例来源:origin: apache/hbase
protected void createTableAndSnapshot(TableName tableName, String snapshotName)
throws IOException {
byte[] column = Bytes.toBytes("A");
Table table = TEST_UTIL.createTable(tableName, column, 2);
TEST_UTIL.loadTable(table, column);
TEST_UTIL.getAdmin().snapshot(snapshotName, tableName);
}
代码示例来源:origin: apache/hbase
@Override
protected void createTableAndSnapshot(TableName tableName, String snapshotName)
throws IOException {
byte[] column = Bytes.toBytes("A");
Table table = MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName, column);
TEST_UTIL.loadTable(table, column);
TEST_UTIL.getAdmin().snapshot(snapshotName, tableName);
}
}
代码示例来源:origin: apache/hbase
@Test
public void testScheduleSCP() throws Exception {
HRegionServer testRs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), Bytes.toBytes("family1"),
true);
ServerName serverName = testRs.getServerName();
Hbck hbck = getHbck();
List<Long> pids =
hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
assertTrue(pids.get(0) > 0);
LOG.info("pid is {}", pids.get(0));
pids = hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
assertTrue(pids.get(0) == -1);
LOG.info("pid is {}", pids.get(0));
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// switch TIF to log at DEBUG level
TEST_UTIL.enableDebug(MultiTableInputFormatBase.class);
// start mini hbase cluster
TEST_UTIL.startMiniCluster(3);
// create and fill table
for (String tableName : TABLES) {
try (Table table =
TEST_UTIL.createMultiRegionTable(TableName.valueOf(tableName),
INPUT_FAMILY, 4)) {
TEST_UTIL.loadTable(table, INPUT_FAMILY, false);
}
}
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void beforeClass() throws Exception {
// Up the handlers; this test needs more than usual.
UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
UTIL.startMiniCluster();
Table table =
UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY,
OUTPUT_FAMILY });
UTIL.loadTable(table, INPUT_FAMILY, false);
UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME);
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void beforeClass() throws Exception {
UTIL.startMiniCluster();
Table table =
UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY,
OUTPUT_FAMILY });
UTIL.loadTable(table, INPUT_FAMILY, false);
UTIL.createTable(TABLE_FOR_NEGATIVE_TESTS, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY });
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// test intermittently fails under hadoop2 (2.0.2-alpha) if shortcircuit-read (scr) is on.
// this turns it off for this test. TODO: Figure out why scr breaks recovery.
System.setProperty("hbase.tests.use.shortcircuit.reads", "false");
// switch TIF to log at DEBUG level
TEST_UTIL.enableDebug(TableInputFormat.class);
TEST_UTIL.enableDebug(TableInputFormatBase.class);
// start mini hbase cluster
TEST_UTIL.startMiniCluster(3);
// create and fill table
table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, INPUT_FAMILYS);
TEST_UTIL.loadTable(table, INPUT_FAMILYS, null, false);
}
代码示例来源:origin: apache/hbase
@Before
public void setUp() throws Exception {
Configuration c = TEST_UTIL.getConfiguration();
c.setBoolean("dfs.support.append", true);
TEST_UTIL.startMiniCluster(1);
table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY);
TEST_UTIL.loadTable(table, FAMILY);
// setup the hdfssnapshots
client = new DFSClient(TEST_UTIL.getDFSCluster().getURI(), TEST_UTIL.getConfiguration());
String fullUrIPath = TEST_UTIL.getDefaultRootDirPath().toString();
String uriString = TEST_UTIL.getTestFileSystem().getUri().toString();
baseDir = StringUtils.removeStart(fullUrIPath, uriString);
client.allowSnapshot(baseDir);
}
代码示例来源:origin: apache/hbase
@Test
public void testGetWALsToSplit() throws Exception {
TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE);
// load table
TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), FAMILY);
ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
List<FileStatus> metaWals = splitWALManager.getWALsToSplit(metaServer, true);
Assert.assertEquals(1, metaWals.size());
List<FileStatus> wals = splitWALManager.getWALsToSplit(metaServer, false);
Assert.assertEquals(1, wals.size());
ServerName testServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
.map(rs -> rs.getRegionServer().getServerName()).filter(rs -> rs != metaServer).findAny()
.get();
metaWals = splitWALManager.getWALsToSplit(testServer, true);
Assert.assertEquals(0, metaWals.size());
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void beforeClass() throws Exception {
// Make servers report eagerly. This test is about looking at the cluster status reported.
// Make it so we don't have to wait around too long to see change.
UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", MSG_INTERVAL);
UTIL.startMiniCluster(4);
admin = UTIL.getAdmin();
admin.balancerSwitch(false, true);
byte[] FAMILY = Bytes.toBytes("f");
for (TableName tableName : tables) {
Table table = UTIL.createMultiRegionTable(tableName, FAMILY, 16);
UTIL.waitTableAvailable(tableName);
UTIL.loadTable(table, FAMILY);
}
}
代码示例来源:origin: apache/hbase
@Test
public void testFlushWithTableCompactionDisabled() throws Exception {
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.setCompactionEnabled(false);
TEST_UTIL.createTable(htd, new byte[][] { family }, null);
// load the table
for (int i = 0; i < blockingStoreFiles + 1; i ++) {
TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(tableName), family);
TEST_UTIL.flush(tableName);
}
// Make sure that store file number is greater than blockingStoreFiles + 1
Path tableDir = FSUtils.getTableDir(rootDir, tableName);
Collection<String> hfiles = SnapshotTestingUtils.listHFileNames(fs, tableDir);
assert(hfiles.size() > blockingStoreFiles + 1);
}
}
代码示例来源:origin: apache/hbase
private Table setupTable(TableName tableName) throws Exception {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
// Flush many files, but do not compact immediately
// Make sure that the region does not split
builder
.setMemStoreFlushSize(5000)
.setRegionSplitPolicyClassName(ConstantSizeRegionSplitPolicy.class.getName())
.setMaxFileSize(100 * 1024 * 1024)
.setValue("hbase.hstore.compactionThreshold", "250");
TableDescriptor td = builder.build();
byte[] fam = Bytes.toBytes("fam");
Table table = TEST_UTIL.createTable(td, new byte[][] {fam},
TEST_UTIL.getConfiguration());
TEST_UTIL.loadTable(table, fam);
return table;
}
}
代码示例来源:origin: apache/hbase
@Before
public void setup() throws IOException, InterruptedException {
// Create a table of three families. This will assign a region.
TEST_UTIL.createTable(TABLENAME, FAMILIES);
Table t = TEST_UTIL.getConnection().getTable(TABLENAME);
TEST_UTIL.waitUntilNoRegionsInTransition();
// Load the table with data for all families
TEST_UTIL.loadTable(t, FAMILIES);
TEST_UTIL.flush();
t.close();
TEST_UTIL.ensureSomeRegionServersAvailable(2);
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void setUpBeforeClass() throws Exception {
cluster = TEST_UTIL.startMiniCluster(ServerNum);
table = TEST_UTIL.createTable(tableName, FAMILY, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
TEST_UTIL.waitTableAvailable(tableName, 1000);
TEST_UTIL.loadTable(table, FAMILY);
for (int i = 0; i < ServerNum; i++) {
HRegionServer server = cluster.getRegionServer(i);
for (HRegion region : server.getRegions(tableName)) {
region.flush(true);
}
}
finder.setConf(TEST_UTIL.getConfiguration());
finder.setServices(cluster.getMaster());
finder.setClusterMetrics(cluster.getMaster().getClusterMetrics());
}
内容来源于网络,如有侵权,请联系作者删除!