本文整理了Java中org.apache.solr.hadoop.ZooKeeperInspector
类的一些代码示例,展示了ZooKeeperInspector
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZooKeeperInspector
类的具体详情如下:
包路径:org.apache.solr.hadoop.ZooKeeperInspector
类名称:ZooKeeperInspector
[英]Extracts SolrCloud information from ZooKeeper.
[中]从ZooKeeper中提取SolrCloud信息。
代码示例来源:origin: NGDATA/hbase-indexer
public static File downloadConfigDir(SolrZkClient zkClient, String configName) throws IOException, InterruptedException, KeeperException {
return DELEGATE.downloadConfigDir(zkClient, configName);
}
}
代码示例来源:origin: NGDATA/hbase-indexer
public static DocCollection extractDocCollection(String zkHost, String collection) {
return DELEGATE.extractDocCollection(zkHost, collection);
}
代码示例来源:origin: NGDATA/hbase-indexer
public static List<List<String>> extractShardUrls(String zkHost, String collection) {
return DELEGATE.extractShardUrls(zkHost, collection);
}
代码示例来源:origin: cloudera/search
assert options.zkHost != null;
ZooKeeperInspector zki = new ZooKeeperInspector();
SolrZkClient zkClient = zki.getZkClient(options.zkHost);
try {
String configName = zki.readConfigName(zkClient, options.collection);
File tmpSolrHomeDir = zki.downloadConfigDir(zkClient, configName);
SolrOutputFormat.setupSolrHomeCache(tmpSolrHomeDir, job);
options.solrHomeDir = tmpSolrHomeDir;
代码示例来源:origin: cloudera/search
docCollection = new ZooKeeperInspector().extractDocCollection(zkHost, collection);
if (docCollection == null) {
throw new IllegalArgumentException("docCollection must not be null");
throw new IllegalArgumentException("Incompatible shards: + " + shards + " for docCollection: " + docCollection);
List<Slice> slices = new ZooKeeperInspector().getSortedSlices(docCollection.getSlices());
if (slices.size() != shards) {
throw new IllegalStateException("Incompatible sorted shards: + " + shards + " for docCollection: " + docCollection);
代码示例来源:origin: cloudera/search
public List<List<String>> extractShardUrls(String zkHost, String collection) {
DocCollection docCollection = extractDocCollection(zkHost, collection);
List<Slice> slices = getSortedSlices(docCollection.getSlices());
List<List<String>> solrUrls = new ArrayList<List<String>>(slices.size());
for (Slice slice : slices) {
if (slice.getLeader() == null) {
throw new IllegalArgumentException("Cannot find SolrCloud slice leader. " +
"It looks like not all of your shards are registered in ZooKeeper yet");
}
Collection<Replica> replicas = slice.getReplicas();
List<String> urls = new ArrayList<String>(replicas.size());
for (Replica replica : replicas) {
ZkCoreNodeProps props = new ZkCoreNodeProps(replica);
urls.add(props.getCoreUrl());
}
solrUrls.add(urls);
}
return solrUrls;
}
代码示例来源:origin: com.cloudera.search/search-mr
private static void verifyZKStructure(Options opts, ArgumentParser parser) throws ArgumentParserException {
if (opts.zkHost != null) {
assert opts.collection != null;
ZooKeeperInspector zki = new ZooKeeperInspector();
try {
opts.shardUrls = zki.extractShardUrls(opts.zkHost, opts.collection);
} catch (Exception e) {
LOG.debug("Cannot extract SolrCloud shard URLs from ZooKeeper", e);
throw new ArgumentParserException(e, parser);
}
assert opts.shardUrls != null;
if (opts.shardUrls.size() == 0) {
throw new ArgumentParserException("--zk-host requires ZooKeeper " + opts.zkHost
+ " to contain at least one SolrCore for collection: " + opts.collection, parser);
}
opts.shards = opts.shardUrls.size();
LOG.debug("Using SolrCloud shard URLs: {}", opts.shardUrls);
}
}
代码示例来源:origin: com.cloudera.search/search-mr
public DocCollection extractDocCollection(String zkHost, String collection) {
if (collection == null) {
throw new IllegalArgumentException("collection must not be null");
}
SolrZkClient zkClient = getZkClient(zkHost);
try (ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
try {
// first check for alias
collection = checkForAlias(zkClient, collection);
zkStateReader.createClusterStateWatchersAndUpdate();
} catch (Exception e) {
throw new IllegalArgumentException("Cannot find expected information for SolrCloud in ZooKeeper: " + zkHost, e);
}
try {
return zkStateReader.getClusterState().getCollection(collection);
} catch (SolrException e) {
throw new IllegalArgumentException("Cannot find collection '" + collection + "' in ZooKeeper: " + zkHost, e);
}
} finally {
zkClient.close();
}
}
代码示例来源:origin: com.ngdata/hbase-indexer-mr
public static String readConfigName(SolrZkClient zkClient, String collection) throws KeeperException, InterruptedException {
return DELEGATE.readConfigName(zkClient, collection);
}
代码示例来源:origin: com.ngdata/hbase-indexer-mr
public static SolrZkClient getZkClient(String zkHost) {
return DELEGATE.getZkClient(zkHost);
}
代码示例来源:origin: NGDATA/hbase-indexer
public static List<Slice> getSortedSlices(Collection<Slice> slices) {
return DELEGATE.getSortedSlices(slices);
}
代码示例来源:origin: com.cloudera.search/search-mr
+ " </solrcloud></solr>",
"UTF-8");
verifyConfigDir(confDir);
fixupConfigDir(useZkSolrConfig, confDir);
return dir;
代码示例来源:origin: com.cloudera.search/search-mr
collection = checkForAlias(zkClient, collection);
代码示例来源:origin: cloudera/search
/**
* Download and return the config directory from ZK
*/
public File downloadConfigDir(SolrZkClient zkClient, String configName)
throws IOException, InterruptedException, KeeperException {
File dir = Files.createTempDir();
dir.deleteOnExit();
ZkController.downloadConfigDir(zkClient, configName, dir);
File confDir = new File(dir, "conf");
if (!confDir.isDirectory()) {
// create a temporary directory with "conf" subdir and mv the config in there. This is
// necessary because of CDH-11188; solrctl does not generate nor accept directories with e.g.
// conf/solrconfig.xml which is necessary for proper solr operation. This should work
// even if solrctl changes.
confDir = new File(Files.createTempDir().getAbsolutePath(), "conf");
confDir.getParentFile().deleteOnExit();
Files.move(dir, confDir);
dir = confDir.getParentFile();
}
verifyConfigDir(confDir);
return dir;
}
代码示例来源:origin: com.cloudera.search/search-mr
assert options.zkHost != null;
ZooKeeperInspector zki = new ZooKeeperInspector();
try (SolrZkClient zkClient = zki.getZkClient(options.zkHost)) {
String configName = zki.readConfigName(zkClient, options.collection);
File tmpSolrHomeDir = zki.downloadConfigDir(zkClient, configName, options.useZkSolrConfig);
SolrOutputFormat.setupSolrHomeCache(tmpSolrHomeDir, job);
options.solrHomeDir = tmpSolrHomeDir;
代码示例来源:origin: com.cloudera.search/search-mr
docCollection = new ZooKeeperInspector().extractDocCollection(zkHost, collection);
if (docCollection == null) {
throw new IllegalArgumentException("docCollection must not be null");
throw new IllegalArgumentException("Incompatible shards: + " + shards + " for docCollection: " + docCollection);
List<Slice> slices = new ZooKeeperInspector().getSortedSlices(docCollection.getSlices());
if (slices.size() != shards) {
throw new IllegalStateException("Incompatible sorted shards: + " + shards + " for docCollection: " + docCollection);
代码示例来源:origin: com.cloudera.search/search-mr
public List<List<String>> extractShardUrls(String zkHost, String collection) {
DocCollection docCollection = extractDocCollection(zkHost, collection);
List<Slice> slices = getSortedSlices(docCollection.getSlices());
List<List<String>> solrUrls = new ArrayList<List<String>>(slices.size());
for (Slice slice : slices) {
if (slice.getLeader() == null) {
throw new IllegalArgumentException("Cannot find SolrCloud slice leader. " +
"It looks like not all of your shards are registered in ZooKeeper yet");
}
Collection<Replica> replicas = slice.getReplicas();
List<String> urls = new ArrayList<String>(replicas.size());
for (Replica replica : replicas) {
ZkCoreNodeProps props = new ZkCoreNodeProps(replica);
if (replica.getStr(Slice.LEADER) == null) {
urls.add(props.getCoreUrl()); // add followers at tail
} else {
urls.add(0, props.getCoreUrl()); // insert leader at head
}
}
solrUrls.add(urls);
}
return solrUrls;
}
代码示例来源:origin: cloudera/search
private static void verifyZKStructure(Options opts, ArgumentParser parser) throws ArgumentParserException {
if (opts.zkHost != null) {
assert opts.collection != null;
ZooKeeperInspector zki = new ZooKeeperInspector();
try {
opts.shardUrls = zki.extractShardUrls(opts.zkHost, opts.collection);
} catch (Exception e) {
LOG.debug("Cannot extract SolrCloud shard URLs from ZooKeeper", e);
throw new ArgumentParserException(e, parser);
}
assert opts.shardUrls != null;
if (opts.shardUrls.size() == 0) {
throw new ArgumentParserException("--zk-host requires ZooKeeper " + opts.zkHost
+ " to contain at least one SolrCore for collection: " + opts.collection, parser);
}
opts.shards = opts.shardUrls.size();
LOG.debug("Using SolrCloud shard URLs: {}", opts.shardUrls);
}
}
代码示例来源:origin: cloudera/search
public DocCollection extractDocCollection(String zkHost, String collection) {
if (collection == null) {
throw new IllegalArgumentException("collection must not be null");
}
SolrZkClient zkClient = getZkClient(zkHost);
try {
ZkStateReader zkStateReader = new ZkStateReader(zkClient);
try {
// first check for alias
collection = checkForAlias(zkClient, collection);
zkStateReader.createClusterStateWatchersAndUpdate();
} catch (Exception e) {
throw new IllegalArgumentException("Cannot find expected information for SolrCloud in ZooKeeper: " + zkHost, e);
}
try {
return zkStateReader.getClusterState().getCollection(collection);
} catch (SolrException e) {
throw new IllegalArgumentException("Cannot find collection '" + collection + "' in ZooKeeper: " + zkHost, e);
}
} finally {
zkClient.close();
}
}
代码示例来源:origin: NGDATA/hbase-indexer
public static String readConfigName(SolrZkClient zkClient, String collection) throws KeeperException, InterruptedException {
return DELEGATE.readConfigName(zkClient, collection);
}
内容来源于网络,如有侵权,请联系作者删除!