本文整理了Java中org.apache.hadoop.hbase.HBaseTestingUtility.getTestFileSystem()
方法的一些代码示例,展示了HBaseTestingUtility.getTestFileSystem()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HBaseTestingUtility.getTestFileSystem()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.HBaseTestingUtility
类名称:HBaseTestingUtility
方法名:getTestFileSystem
暂无
代码示例来源:origin: apache/hbase
public BulkLoadHelper(Path loadPath) throws IOException {
fs = TEST_UTIL.getTestFileSystem();
conf = TEST_UTIL.getConfiguration();
loadPath = loadPath.makeQualified(fs);
this.loadPath = loadPath;
}
代码示例来源:origin: apache/hbase
private boolean mobColumnFamilyDirExist(TableName tn, String familyName) throws IOException {
FileSystem fs = TEST_UTIL.getTestFileSystem();
Path mobFamilyDir = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName);
return fs.exists(mobFamilyDir);
}
代码示例来源:origin: apache/hbase
private int countMobFiles(TableName tn, String familyName) throws IOException {
FileSystem fs = TEST_UTIL.getTestFileSystem();
Path mobFileDir = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, familyName);
if (fs.exists(mobFileDir)) {
return fs.listStatus(mobFileDir).length;
}
return 0;
}
代码示例来源:origin: apache/hbase
private boolean mobTableDirExist(TableName tn) throws IOException {
FileSystem fs = TEST_UTIL.getTestFileSystem();
Path tableDir = FSUtils.getTableDir(MobUtils.getMobHome(TEST_UTIL.getConfiguration()), tn);
return fs.exists(tableDir);
}
代码示例来源:origin: apache/hbase
WAL.Reader createWALReaderForPrimary() throws FileNotFoundException, IOException {
return WALFactory.createReader(TEST_UTIL.getTestFileSystem(),
AbstractFSWALProvider.getCurrentFileName(walPrimary),
TEST_UTIL.getConfiguration());
}
代码示例来源:origin: apache/hbase
@Before
public void setUp() throws IOException {
htu = new HBaseTestingUtility();
fs = htu.getTestFileSystem();
conf = htu.getConfiguration();
}
代码示例来源:origin: apache/hbase
private HRegion openSnapshotRegion(RegionInfo firstRegion, Path tableDir) throws IOException {
return HRegion.openReadOnlyFileSystemHRegion(
TEST_UTIL.getConfiguration(),
TEST_UTIL.getTestFileSystem(),
tableDir,
firstRegion,
table.getDescriptor()
);
}
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void setUp() throws Exception {
conf = TEST_UTIL.getConfiguration();
cacheConf = new CacheConfig(conf);
fs = TEST_UTIL.getTestFileSystem();
}
代码示例来源:origin: apache/hbase
private boolean mobArchiveExist(TableName tn, String familyName, String fileName)
throws IOException {
FileSystem fs = TEST_UTIL.getTestFileSystem();
Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn,
MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName);
return fs.exists(new Path(storePath, fileName));
}
代码示例来源:origin: apache/hbase
private int countArchiveMobFiles(TableName tn, String familyName)
throws IOException {
FileSystem fs = TEST_UTIL.getTestFileSystem();
Path storePath = HFileArchiveUtil.getStoreArchivePath(TEST_UTIL.getConfiguration(), tn,
MobUtils.getMobRegionInfo(tn).getEncodedName(), familyName);
if (fs.exists(storePath)) {
return fs.listStatus(storePath).length;
}
return 0;
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void setUpBeforeClass() throws Exception {
UTIL.getConfiguration().setInt("replication.source.nb.capacity", 10);
UTIL.getConfiguration().setLong("replication.sleep.before.failover", 1000);
UTIL.getConfiguration().setLong("hbase.serial.replication.waiting.ms", 100);
UTIL.startMiniCluster(3);
// disable balancer
UTIL.getAdmin().balancerSwitch(false, true);
LOG_DIR = UTIL.getDataTestDirOnTestFS("replicated");
FS = UTIL.getTestFileSystem();
FS.mkdirs(LOG_DIR);
}
代码示例来源:origin: apache/hbase
@Before
public void setup() throws IOException {
TEST_UTIL = new HBaseTestingUtility();
FILESYSTEM = TEST_UTIL.getTestFileSystem();
CONF = TEST_UTIL.getConfiguration();
method = name.getMethodName();
tableName = TableName.valueOf(method);
}
代码示例来源:origin: apache/hbase
@Test
public void testEqualsWithLink() throws IOException {
Path origin = new Path("/origin");
Path tmp = TEST_UTIL.getDataTestDir();
Path mob = new Path("/mob");
Path archive = new Path("/archive");
HFileLink link1 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"),
new Path(mob, "f1"), new Path(archive, "f1"));
HFileLink link2 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"),
new Path(mob, "f1"), new Path(archive, "f1"));
StoreFileInfo info1 = new StoreFileInfo(TEST_UTIL.getConfiguration(),
TEST_UTIL.getTestFileSystem(), null, link1);
StoreFileInfo info2 = new StoreFileInfo(TEST_UTIL.getConfiguration(),
TEST_UTIL.getTestFileSystem(), null, link2);
assertEquals(info1, info2);
assertEquals(info1.hashCode(), info2.hashCode());
}
}
代码示例来源:origin: apache/hbase
private void runTest(String testName, TableDescriptor htd,
boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap,
boolean copyFiles, int depth) throws Exception {
loadHFiles(testName, htd, util, FAMILY, QUALIFIER, preCreateTable, tableSplitKeys, hfileRanges,
useMap, true, copyFiles, 0, 1000, depth);
final TableName tableName = htd.getTableName();
// verify staging folder has been cleaned up
Path stagingBasePath =
new Path(FSUtils.getRootDir(util.getConfiguration()), HConstants.BULKLOAD_STAGING_DIR_NAME);
FileSystem fs = util.getTestFileSystem();
if (fs.exists(stagingBasePath)) {
FileStatus[] files = fs.listStatus(stagingBasePath);
for (FileStatus file : files) {
assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
file.getPath().getName() != "DONOTERASE");
}
}
util.deleteTable(tableName);
}
代码示例来源:origin: apache/hbase
@Before
public void setup() throws Exception {
rootDir = TEST_UTIL.getDataTestDir("testRestore");
archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
fs = TEST_UTIL.getTestFileSystem();
conf = TEST_UTIL.getConfiguration();
setupConf(conf);
FSUtils.setRootDir(conf, rootDir);
}
代码示例来源:origin: apache/hbase
@BeforeClass
public static void setUpBeforeClass() throws Exception {
EVENT_LOOP_GROUP = new NioEventLoopGroup();
CHANNEL_CLASS = NioSocketChannel.class;
UTIL.startMiniDFSCluster(3);
UTIL.getTestFileSystem().mkdirs(UTIL.getDataTestDirOnTestFS());
WALS = new WALFactory(UTIL.getConfiguration(), TestCombinedAsyncWriter.class.getSimpleName());
}
代码示例来源:origin: apache/hbase
private void loadAndValidateHFileReplication(String testName, byte[] row, byte[] fam,
Table source, byte[][][] hfileRanges, int numOfRows) throws Exception {
Path dir = utility1.getDataTestDirOnTestFS(testName);
FileSystem fs = utility1.getTestFileSystem();
dir = dir.makeQualified(fs);
Path familyDir = new Path(dir, Bytes.toString(fam));
int hfileIdx = 0;
for (byte[][] range : hfileRanges) {
byte[] from = range[0];
byte[] to = range[1];
HFileTestUtil.createHFile(utility1.getConfiguration(), fs, new Path(familyDir, "hfile_"
+ hfileIdx++), fam, row, from, to, numOfRows);
}
final TableName tableName = source.getName();
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(utility1.getConfiguration());
String[] args = { dir.toString(), tableName.toString() };
loader.run(args);
}
代码示例来源:origin: apache/hbase
MockHStoreFile(HBaseTestingUtility testUtil, Path testPath,
long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(),
new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true);
this.length = length;
this.isRef = isRef;
this.ageInDisk = ageInDisk;
this.sequenceid = sequenceid;
this.isMajor = false;
hdfsBlocksDistribution = new HDFSBlocksDistribution();
hdfsBlocksDistribution.addHostsAndBlockWeight(
new String[] { RSRpcServices.getHostname(testUtil.getConfiguration(), false) }, 1);
modificationTime = EnvironmentEdgeManager.currentTime();
}
代码示例来源:origin: apache/hbase
@Test
public void testSplitStoreFile() throws IOException {
Path dir = util.getDataTestDirOnTestFS("testSplitHFile");
FileSystem fs = util.getTestFileSystem();
Path testIn = new Path(dir, "testhfile");
ColumnFamilyDescriptor familyDesc = ColumnFamilyDescriptorBuilder.of(FAMILY);
HFileTestUtil.createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER,
Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);
Path bottomOut = new Path(dir, "bottom.out");
Path topOut = new Path(dir, "top.out");
LoadIncrementalHFiles.splitStoreFile(util.getConfiguration(), testIn, familyDesc,
Bytes.toBytes("ggg"), bottomOut, topOut);
int rowCount = verifyHFile(bottomOut);
rowCount += verifyHFile(topOut);
assertEquals(1000, rowCount);
}
代码示例来源:origin: apache/hbase
protected void addHFilesToRegions() throws IOException {
MasterFileSystem mfs = HTU.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), TABLE_NAME);
for (Region region : cluster.getRegions(TABLE_NAME)) {
Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
Path familyDir = new Path(regionDir, Bytes.toString(FAMILY));
HFileTestUtil
.createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY,
QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS);
}
}
}
内容来源于网络,如有侵权,请联系作者删除!