本文整理了Java中org.apache.hadoop.hbase.HBaseTestingUtility.getDataTestDirOnTestFS()
方法的一些代码示例,展示了HBaseTestingUtility.getDataTestDirOnTestFS()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HBaseTestingUtility.getDataTestDirOnTestFS()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.HBaseTestingUtility
类名称:HBaseTestingUtility
方法名:getDataTestDirOnTestFS
[英]Returns a Path in the test filesystem, obtained from #getTestFileSystem()to write temporary test data. Call this method after setting up the mini dfs cluster if the test relies on it.
[中]
代码示例来源:origin: apache/hbase
/**
* Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
* to write temporary test data. Call this method after setting up the mini dfs cluster
* if the test relies on it.
* @return a unique path in the test filesystem
* @param subdirName name of the subdir to create under the base test dir
*/
public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
return new Path(getDataTestDirOnTestFS(), subdirName);
}
代码示例来源:origin: apache/hbase
private void setHBaseFsTmpDir() throws IOException {
String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
if (hbaseFsTmpDirInString == null) {
this.conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
} else {
LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
}
}
代码示例来源:origin: apache/hbase
/**
* Cleans a subdirectory under the test data directory on the test filesystem.
* @return True if we removed child
* @throws IOException
*/
public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
Path cpath = getDataTestDirOnTestFS(subdirName);
return getTestFileSystem().delete(cpath, true);
}
代码示例来源:origin: apache/hbase
@Before public void setUp() throws Exception {
rootRegionDir = UTILITY.getDataTestDirOnTestFS("TestMajorCompactionRequest");
regionStoreDir = new Path(rootRegionDir, FAMILY);
}
代码示例来源:origin: apache/hbase
private Path getPath(int index) throws IOException {
String methodName = name.getMethodName().replaceAll("[^A-Za-z0-9_-]", "_");
return new Path(UTIL.getDataTestDirOnTestFS(), methodName + "-" + index);
}
代码示例来源:origin: apache/hbase
@Before
public void setup() throws IOException {
root = TEST_UTIL.getDataTestDirOnTestFS();
}
代码示例来源:origin: apache/hbase
@Test
public void testSkipEmptyColumns() throws Exception {
Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles");
args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
args.put(ImportTsv.COLUMNS_CONF_KEY, "HBASE_ROW_KEY,HBASE_TS_KEY,FAM:A,FAM:B");
args.put(ImportTsv.SEPARATOR_CONF_KEY, ",");
args.put(ImportTsv.SKIP_EMPTY_COLUMNS, "true");
// 2 Rows of data as input. Both rows are valid and only 3 columns are no-empty among 4
String data = "KEY,1234,VALUE1,VALUE2\nKEY,1235,,VALUE2\n";
doMROnTableTest(util, tn, FAMILY, data, args, 1, 3);
util.deleteTable(tn);
}
代码示例来源:origin: apache/hbase
@Test
public void testBulkOutputWithTsvImporterTextMapper() throws Exception {
Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles");
args.put(ImportTsv.MAPPER_CONF_KEY, "org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper");
args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString());
String data = "KEY\u001bVALUE4\u001bVALUE8\n";
doMROnTableTest(data, 4);
util.deleteTable(tn);
}
代码示例来源:origin: apache/hbase
@Test
public void testDryModeWithBulkModeAndTableDoesNotExistsCreateTableSetToYes() throws Exception {
// Prepare the arguments required for the test.
Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles");
args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString());
args.put(ImportTsv.DRY_RUN_CONF_KEY, "true");
args.put(ImportTsv.CREATE_TABLE_CONF_KEY, "yes");
doMROnTableTest(null, 1);
// Verify temporary table was deleted.
exception.expect(TableNotFoundException.class);
util.deleteTable(tn);
}
代码示例来源:origin: apache/hbase
@Test
public void testBulkOutputWithoutAnExistingTable() throws Exception {
// Prepare the arguments required for the test.
Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles");
args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString());
doMROnTableTest(null, 3);
util.deleteTable(tn);
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void before() throws Exception {
HTU.startMiniCluster();
HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY,
10 * 1024 * 1024L);
rootRegionDir = HTU.getDataTestDirOnTestFS("TestRowTooBig");
}
代码示例来源:origin: apache/hbase
@Test
public void testBulkOutputWithAnExistingTableNoStrictTrue() throws Exception {
util.createTable(tn, FAMILY);
// Prepare the arguments required for the test.
Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles");
args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString());
args.put(ImportTsv.NO_STRICT_COL_FAMILY, "true");
doMROnTableTest(null, 3);
util.deleteTable(tn);
}
代码示例来源:origin: apache/hbase
@Test public void testDryModeWithBulkOutputAndTableExists() throws Exception {
util.createTable(tn, FAMILY);
// Prepare the arguments required for the test.
Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles");
args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString());
args.put(ImportTsv.DRY_RUN_CONF_KEY, "true");
doMROnTableTest(null, 1);
// Dry mode should not delete an existing table. If it's not present,
// this will throw TableNotFoundException.
util.deleteTable(tn);
}
代码示例来源:origin: apache/hbase
private Path buildBulkFiles(TableName table, int value) throws Exception {
Path dir = util.getDataTestDirOnTestFS(table.getNameAsString());
Path bulk1 = new Path(dir, table.getNameAsString() + value);
FileSystem fs = util.getTestFileSystem();
buildHFiles(fs, bulk1, value);
return bulk1;
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void setUpBeforeClass() throws Exception {
UTIL.getConfiguration().setInt("replication.source.nb.capacity", 10);
UTIL.getConfiguration().setLong("replication.sleep.before.failover", 1000);
UTIL.getConfiguration().setLong("hbase.serial.replication.waiting.ms", 100);
UTIL.startMiniCluster(3);
// disable balancer
UTIL.getAdmin().balancerSwitch(false, true);
LOG_DIR = UTIL.getDataTestDirOnTestFS("replicated");
FS = UTIL.getTestFileSystem();
FS.mkdirs(LOG_DIR);
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void setUpBeforeClass() throws Exception {
EVENT_LOOP_GROUP = new NioEventLoopGroup();
CHANNEL_CLASS = NioSocketChannel.class;
UTIL.startMiniDFSCluster(3);
UTIL.getTestFileSystem().mkdirs(UTIL.getDataTestDirOnTestFS());
WALS = new WALFactory(UTIL.getConfiguration(), TestCombinedAsyncWriter.class.getSimpleName());
}
代码示例来源:origin: apache/hbase
protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families)
throws IOException {
Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log");
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
HRegionInfo hri = new HRegionInfo(tableName, startKey, stopKey);
final WAL wal = HBaseTestingUtility.createWal(conf, logDir, hri);
return initHRegion(tableName, startKey, stopKey, isReadOnly,
Durability.SYNC_WAL, wal, families);
}
代码示例来源:origin: apache/hbase
@Before
public void setup() throws Exception {
testUtil = new HBaseTestingUtility();
testUtil.startMiniDFSCluster(1);
testDir = testUtil.getDataTestDirOnTestFS();
FSUtils.setRootDir(testUtil.getConfiguration(), testDir);
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void setUpBeforeClass() throws Exception {
CONF = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniDFSCluster(1);
CLUSTER = TEST_UTIL.getDFSCluster();
FS = CLUSTER.getFileSystem();
DIR = TEST_UTIL.getDataTestDirOnTestFS("TestDurability");
FSUtils.setRootDir(CONF, DIR);
}
代码示例来源:origin: apache/hbase
@Before
public void setup() throws ReplicationException, IOException {
root = TEST_UTIL.getDataTestDirOnTestFS();
rp.getPeerStorage().addPeer(peerId,
ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL.getClusterKey()).build(), true,
SyncReplicationState.NONE);
rq.addPeerToHFileRefs(peerId);
}
内容来源于网络,如有侵权,请联系作者删除!