org.apache.hadoop.hbase.HBaseTestingUtility.createRootDir()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(9.3k)|赞(0)|评价(0)|浏览(90)

本文整理了Java中org.apache.hadoop.hbase.HBaseTestingUtility.createRootDir()方法的一些代码示例,展示了HBaseTestingUtility.createRootDir()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HBaseTestingUtility.createRootDir()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.HBaseTestingUtility
类名称:HBaseTestingUtility
方法名:createRootDir

HBaseTestingUtility.createRootDir介绍

[英]Same as HBaseTestingUtility#createRootDir(boolean create)except that create flag is false.
[中]与HBaseTestingUtility#createRootDir(布尔创建)相同,只是create标志为false。

代码示例

代码示例来源:origin: apache/hbase

/**
 * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
 * except that <code>create</code> flag is false.
 * @return Fully qualified path to hbase root dir
 * @throws IOException
 */
public Path createRootDir() throws IOException {
 return createRootDir(false);
}

代码示例来源:origin: apache/hbase

@BeforeClass
public static void setUpBeforeClass() throws Exception {
 TEST_UTIL.startMiniDFSCluster(1);
 FS = TEST_UTIL.getDFSCluster().getFileSystem();
 Path rootDir = TEST_UTIL.createRootDir();
 oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
 if (FS.exists(oldLogDir)) FS.delete(oldLogDir, true);
 logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
 if (FS.exists(logDir)) FS.delete(logDir, true);
}

代码示例来源:origin: apache/hbase

@Before
public void setUp() throws Exception {
 FileStatus[] entries = FS.listStatus(new Path("/"));
 for (FileStatus dir : entries) {
  FS.delete(dir.getPath(), true);
 }
 final Path hbaseDir = TEST_UTIL.createRootDir();
 final Path hbaseWALDir = TEST_UTIL.createWALRootDir();
 DIR = new Path(hbaseWALDir, currentTest.getMethodName());
 assertNotEquals(hbaseDir, hbaseWALDir);
}

代码示例来源:origin: apache/hbase

@BeforeClass
public static void setUpBeforeClass() throws Exception {
 conf = TEST_UTIL.getConfiguration();
 conf.setInt("hbase.regionserver.maxlogs", 5);
 rootDir = TEST_UTIL.createRootDir();
 walRootDir = TEST_UTIL.createWALRootDir();
 fs = FSUtils.getRootDirFileSystem(conf);
 logFs = FSUtils.getWALFileSystem(conf);
}

代码示例来源:origin: apache/hbase

@Before
public void setUp() throws Exception {
 LOG.info("Cleaning up cluster for new test.");
 fs = TEST_UTIL.getDFSCluster().getFileSystem();
 HBASEDIR = TEST_UTIL.createRootDir();
 HBASELOGDIR = TEST_UTIL.createWALRootDir();
 OLDLOGDIR = new Path(HBASELOGDIR, HConstants.HREGION_OLDLOGDIR_NAME);
 CORRUPTDIR = new Path(HBASELOGDIR, HConstants.CORRUPT_DIR_NAME);
 TABLEDIR = FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
 TMPDIRNAME = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
  HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
 REGIONS.clear();
 Collections.addAll(REGIONS, "bbb", "ccc");
 InstrumentedLogWriter.activateFailure = false;
 wals = new WALFactory(conf, name.getMethodName());
 WALDIR = new Path(HBASELOGDIR,
   AbstractFSWALProvider.getWALDirectoryName(ServerName.valueOf(name.getMethodName(),
     16010, System.currentTimeMillis()).toString()));
 //fs.mkdirs(WALDIR);
}

代码示例来源:origin: apache/hbase

@BeforeClass
public static void setUpBeforeClass() throws Exception {
 // Make block sizes small.
 conf = TEST_UTIL.getConfiguration();
 conf.setInt("dfs.blocksize", 1024 * 1024);
 conf.setInt("dfs.replication", 1);
 TEST_UTIL.startMiniDFSCluster(1);
 conf = TEST_UTIL.getConfiguration();
 fs = TEST_UTIL.getDFSCluster().getFileSystem();
 hbaseDir = TEST_UTIL.createRootDir();
 walRootDir = TEST_UTIL.createWALRootDir();
 walFs = FSUtils.getWALFileSystem(conf);
 logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
}

代码示例来源:origin: apache/hbase

@BeforeClass
public static void setUpBeforeClass() throws Exception {
 CommonFSUtils.setWALRootDir(TEST_UTIL.getConfiguration(), new Path("file:///tmp/wal"));
 // Make block sizes small.
 TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
 // needed for testAppendClose()
 // quicker heartbeat interval for faster DN death notification
 TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
 TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
 TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000);
 // faster failover with cluster.shutdown();fs.close() idiom
 TEST_UTIL.getConfiguration()
   .setInt("hbase.ipc.client.connect.max.retries", 1);
 TEST_UTIL.getConfiguration().setInt(
   "dfs.client.block.recovery.retries", 1);
 TEST_UTIL.getConfiguration().setInt(
  "hbase.ipc.client.connection.maxidletime", 500);
 TEST_UTIL.getConfiguration().setInt("hbase.lease.recovery.timeout", 10000);
 TEST_UTIL.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
 TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
   SampleRegionWALCoprocessor.class.getName());
 TEST_UTIL.startMiniDFSCluster(3);
 conf = TEST_UTIL.getConfiguration();
 cluster = TEST_UTIL.getDFSCluster();
 hbaseDir = TEST_UTIL.createRootDir();
 hbaseWALDir = TEST_UTIL.createWALRootDir();
}

代码示例来源:origin: apache/hbase

@Before
public void setUp() throws Exception {
 fs = TEST_UTIL.getDFSCluster().getFileSystem();
 dir = new Path(TEST_UTIL.createRootDir(), currentTest.getMethodName());
 wals = new WALFactory(TEST_UTIL.getConfiguration(), currentTest.getMethodName());
}

代码示例来源:origin: apache/hbase

@BeforeClass
public static void setUpBeforeClass() throws Exception {
 // Make block sizes small.
 TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
 // quicker heartbeat interval for faster DN death notification
 TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
 TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
 TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000);
 // faster failover with cluster.shutdown();fs.close() idiom
 TEST_UTIL.getConfiguration()
   .setInt("hbase.ipc.client.connect.max.retries", 1);
 TEST_UTIL.getConfiguration().setInt(
   "dfs.client.block.recovery.retries", 1);
 TEST_UTIL.getConfiguration().setInt(
  "hbase.ipc.client.connection.maxidletime", 500);
 TEST_UTIL.startMiniDFSCluster(3);
 // Set up a working space for our tests.
 TEST_UTIL.createRootDir();
 conf = TEST_UTIL.getConfiguration();
 fs = TEST_UTIL.getDFSCluster().getFileSystem();
}

代码示例来源:origin: apache/hbase

@BeforeClass
public static void setUpBeforeClass() throws Exception {
 conf = TEST_UTIL.getConfiguration();
 TEST_UTIL.startMiniDFSCluster(1);
 rootDir = TEST_UTIL.createRootDir();
 walRootDir = TEST_UTIL.createWALRootDir();
 fs = FSUtils.getRootDirFileSystem(conf);
 walFs = FSUtils.getWALFileSystem(conf);
}

代码示例来源:origin: apache/hbase

@BeforeClass
public static void beforeClass() throws Exception {
 conf= TEST_UTIL.getConfiguration();
 rootDir = TEST_UTIL.createRootDir();
 walRootDir = TEST_UTIL.createWALRootDir();
 fs = FSUtils.getRootDirFileSystem(conf);
 logFs = FSUtils.getWALFileSystem(conf);
 cluster = TEST_UTIL.startMiniCluster();
}

代码示例来源:origin: apache/hbase

@Before
public void setUp() throws Exception {
 testUtil = new HBaseTestingUtility();
 testUtil.startMiniDFSCluster(1);
 testUtil.startMiniZKCluster(1);
 testUtil.createRootDir();
 cluster = new LocalHBaseCluster(testUtil.getConfiguration(), 0, 0);
}

代码示例来源:origin: apache/hbase

@Before
public void setUp() throws Exception {
 testUtil = new HBaseTestingUtility();
 testUtil.startMiniDFSCluster(1);
 testUtil.startMiniZKCluster(1);
 testUtil.createRootDir(); //manually setup hbase dir to point to minidfscluster
 cluster = new LocalHBaseCluster(testUtil.getConfiguration(), 0, 0);
}

代码示例来源:origin: apache/hbase

@Test
public void testRewritingClusterIdToPB() throws Exception {
 TEST_UTIL.startMiniZKCluster();
 TEST_UTIL.startMiniDFSCluster(1);
 TEST_UTIL.createRootDir();
 Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
 FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration());
 Path filePath = new Path(rootDir, HConstants.CLUSTER_ID_FILE_NAME);
 FSDataOutputStream s = null;
 try {
  s = fs.create(filePath);
  s.writeUTF(TEST_UTIL.getRandomUUID().toString());
 } finally {
  if (s != null) {
   s.close();
  }
 }
 TEST_UTIL.startMiniHBaseCluster();
 HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
 int expected = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration())? 2: 1;
 assertEquals(expected, master.getServerManager().getOnlineServersList().size());
}

代码示例来源:origin: apache/hbase

util.startMiniDFSCluster(3);
util.startMiniZKCluster();
util.createRootDir();
final LocalHBaseCluster cluster =
  new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class,

代码示例来源:origin: apache/hbase

throws IOException, InterruptedException {
createRootDir(option.isCreateRootDir());
if (option.isCreateWALDir()) {
 createWALRootDir();

代码示例来源:origin: apache/hbase

TEST_UTIL.startMiniDFSCluster(3);
TEST_UTIL.startMiniZKCluster();
TEST_UTIL.createRootDir();
final LocalHBaseCluster cluster =
  new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class,

代码示例来源:origin: org.apache.hbase/hbase-server

/**
 * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
 * except that <code>create</code> flag is false.
 * @return Fully qualified path to hbase root dir
 * @throws IOException
 */
public Path createRootDir() throws IOException {
 return createRootDir(false);
}

代码示例来源:origin: org.apache.hbase/hbase-server

@Before
public void setUp() throws Exception {
 FileStatus[] entries = FS.listStatus(new Path("/"));
 for (FileStatus dir : entries) {
  FS.delete(dir.getPath(), true);
 }
 final Path hbaseDir = TEST_UTIL.createRootDir();
 final Path hbaseWALDir = TEST_UTIL.createWALRootDir();
 DIR = new Path(hbaseWALDir, currentTest.getMethodName());
 assertNotEquals(hbaseDir, hbaseWALDir);
}

代码示例来源:origin: org.apache.hbase/hbase-server

@Before
public void setUp() throws Exception {
 fs = TEST_UTIL.getDFSCluster().getFileSystem();
 dir = new Path(TEST_UTIL.createRootDir(), currentTest.getMethodName());
 wals = new WALFactory(TEST_UTIL.getConfiguration(), currentTest.getMethodName());
}

相关文章

HBaseTestingUtility类方法