本文整理了Java中com.liveramp.hank.config.yaml.YamlCoordinatorConfigurator
类的一些代码示例,展示了YamlCoordinatorConfigurator
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。YamlCoordinatorConfigurator
类的具体详情如下:
包路径:com.liveramp.hank.config.yaml.YamlCoordinatorConfigurator
类名称:YamlCoordinatorConfigurator
暂无
代码示例来源:origin: LiveRamp/hank
public static void main(String[] args) throws IOException, InvalidConfigurationException, InterruptedException, KeeperException {
CommandLineChecker.check(args, new String[]{"configuration"},
MigrationHelper.class);
String configurationPath = args[0];
Coordinator coordinator = new YamlCoordinatorConfigurator(configurationPath).createCoordinator();
}
}
代码示例来源:origin: LiveRamp/hank
@Override
public Coordinator createCoordinator() {
try {
validate();
} catch (InvalidConfigurationException e) {
throw new RuntimeException("Configuration is invalid!", e);
}
String factoryClassName = getString(COORDINATOR_SECTION_KEY, COORDINATOR__FACTORY_KEY);
Class<CoordinatorFactory> factoryClass;
try {
factoryClass = (Class<CoordinatorFactory>) Class.forName(factoryClassName);
} catch (ClassNotFoundException e) {
throw new RuntimeException("Could not load coordinator factory class " + factoryClassName + "!", e);
}
CoordinatorFactory factory;
try {
factory = factoryClass.newInstance();
} catch (Exception e) {
throw new RuntimeException("Could not get an instance of " + factoryClass.getName() + "!", e);
}
return factory.getCoordinator(getSection(COORDINATOR_SECTION_KEY, COORDINATOR__OPTIONS_KEY));
}
}
代码示例来源:origin: LiveRamp/hank
@Override
protected void validate() throws InvalidConfigurationException {
checkNonEmptyConfiguration();
getRequiredString(COORDINATOR_SECTION_KEY, COORDINATOR__FACTORY_KEY);
getRequiredSection(COORDINATOR_SECTION_KEY, COORDINATOR__OPTIONS_KEY);
}
代码示例来源:origin: LiveRamp/hank
static public CoordinatorConfigurator getConfigurator(int numPartitions) {
if (numPartitions < 1) {
throw new RuntimeException("Number of partitions must be > 0 instead of " + numPartitions);
}
YamlCoordinatorConfigurator configurator = new YamlCoordinatorConfigurator();
try {
configurator.loadFromYaml("coordinator:\n factory: com.liveramp.hank.hadoop.test.MapStorageEngineCoordinator$Factory\n options:\n numPartitions: " + numPartitions + "\n remote_domain_root: /tmp\n");
} catch (InvalidConfigurationException e) {
throw new RuntimeException(e);
}
return configurator;
}
代码示例来源:origin: LiveRamp/hank
@Override
protected void validate() throws InvalidConfigurationException {
super.validate();
getRequiredSection(SMART_CLIENT_SECTION_KEY);
getRequiredInteger(SMART_CLIENT_SECTION_KEY, NUM_WORKER_THREADS);
getRequiredInteger(SMART_CLIENT_SECTION_KEY, SERVICE_PORT_KEY);
getRequiredString(SMART_CLIENT_SECTION_KEY, RING_GROUP_NAME_KEY);
}
}
代码示例来源:origin: LiveRamp/hank
public static Coordinator createCoordinator(String tmpDir,
int zkPort,
int sessionTimeout,
String domainsRoot,
String domainGroupsRoot,
String ringGroupsRoot) throws IOException, InvalidConfigurationException {
String tmpFile = tmpDir + "/" + UUID.randomUUID().toString();
FileWriter fileWriter = new FileWriter(tmpFile);
fileWriter.append(coordinatorConfig(zkPort, sessionTimeout, domainsRoot, domainGroupsRoot, ringGroupsRoot));
fileWriter.close();
CoordinatorConfigurator config = new YamlCoordinatorConfigurator(tmpFile);
return config.createCoordinator();
}
代码示例来源:origin: LiveRamp/hank
@Override
protected void validate() throws InvalidConfigurationException {
super.validate();
getRequiredSection(RING_GROUP_CONDUCTOR_SECTION_KEY);
getRequiredString(RING_GROUP_CONDUCTOR_SECTION_KEY, RING_GROUP_NAME_KEY);
getRequiredInteger(RING_GROUP_CONDUCTOR_SECTION_KEY, SLEEP_INTERVAL_KEY);
getRequiredInteger(RING_GROUP_CONDUCTOR_SECTION_KEY, MIN_RING_FULLY_SERVING_OBSERVATIONS_KEY);
getRequiredString(RING_GROUP_CONDUCTOR_SECTION_KEY, INITIAL_MODE_KEY);
}
}
代码示例来源:origin: LiveRamp/hank
public static void main(String[] args) throws Exception {
CommandLineChecker.check(args, new String[]{"web_ui_configuration_file_path", "monitor_configuration_file_path",
"port", "log4j_config_file"}, WebUiServer.class);
org.apache.log4j.Logger.getLogger("com.liveramp.hank").setLevel(Level.INFO);
String clientConfigPath = args[0];
String monitorConfigPath = args[1];
int port = Integer.parseInt(args[2]);
PropertyConfigurator.configure(args[3]);
CoordinatorConfigurator webUiConfigurator = new YamlCoordinatorConfigurator(clientConfigPath);
Coordinator coordinator = webUiConfigurator.createCoordinator();
MonitorConfigurator monitorConfigurator = new YamlMonitorConfigurator(monitorConfigPath);
start(monitorConfigurator, coordinator, port);
}
代码示例来源:origin: LiveRamp/hank
Coordinator coordinator = new YamlCoordinatorConfigurator(configurationPath).createCoordinator();
Domain domain = coordinator.getDomain(domainName);
if (domain == null) {
代码示例来源:origin: LiveRamp/hank
@Override
public void validate() throws InvalidConfigurationException {
super.validate();
getRequiredSection(HANK_SMART_CLIENT_SECTION_KEY);
getRequiredString(HANK_SMART_CLIENT_SECTION_KEY, RING_GROUP_NAME_KEY);
getRequiredInteger(HANK_SMART_CLIENT_SECTION_KEY, NUM_CONNECTIONS_PER_HOST_KEY);
getRequiredInteger(HANK_SMART_CLIENT_SECTION_KEY, QUERY_NUM_MAX_TRIES_KEY);
getRequiredInteger(HANK_SMART_CLIENT_SECTION_KEY, TRY_LOCK_CONNECTION_TIMEOUT_MS_KEY);
getRequiredInteger(HANK_SMART_CLIENT_SECTION_KEY, ESTABLISH_CONNECTION_TIMEOUT_MS_KEY);
getRequiredInteger(HANK_SMART_CLIENT_SECTION_KEY, QUERY_TIMEOUT_MS_KEY);
getRequiredInteger(HANK_SMART_CLIENT_SECTION_KEY, BULK_QUERY_TIMEOUT_MS_KEY);
}
代码示例来源:origin: LiveRamp/hank
public static void main(String[] args) throws IOException, InvalidConfigurationException {
CommandLineChecker.check(args, new String[]{
"domain name", "version to compact number", "mapred.task.timeout", "config path", "jobjar"},
HadoopDomainCompactor.class);
String domainName = args[0];
Integer versionToCompactNumber = Integer.valueOf(args[1]);
Integer mapredTaskTimeout = Integer.valueOf(args[2]);
CoordinatorConfigurator configurator = new YamlCoordinatorConfigurator(args[3]);
String jobJar = args[4];
DomainCompactorProperties properties =
new DomainCompactorProperties(domainName, versionToCompactNumber, configurator);
JobConf conf = new JobConf();
conf.setJar(jobJar);
conf.set("mapred.task.timeout", mapredTaskTimeout.toString());
conf.setJobName(HadoopDomainCompactor.class.getSimpleName()
+ " Domain " + domainName + ", Version " + versionToCompactNumber);
HadoopDomainCompactor compactor = new HadoopDomainCompactor(conf);
LOG.info("Compacting Hank domain " + domainName + " version " + versionToCompactNumber
+ " with coordinator configuration " + configurator);
compactor.buildHankDomain(properties, new IncrementalDomainVersionProperties.Base("Version "
+ versionToCompactNumber + " compacted"));
}
}
代码示例来源:origin: LiveRamp/hank
public static void main(String[] args) throws IOException, InvalidConfigurationException {
CommandLineChecker.check(args, new String[]{"configuration", "domain name", "domain version number"},
RemoteDomainVersionDeletionHelper.class
);
String configurationPath = args[0];
String domainName = args[1];
Integer versionNumber = Integer.parseInt(args[2]);
Coordinator coordinator = new YamlCoordinatorConfigurator(configurationPath).createCoordinator();
Domain domain = coordinator.getDomain(domainName);
if (domain == null) {
throw new RuntimeException("Given domain was not found: " + domainName);
}
DomainVersion domainVersion = domain.getVersion(versionNumber);
if (domainVersion == null) {
throw new RuntimeException("Given version was not found: " + domainName + " version " + versionNumber);
}
LOG.info("Deleting remote data for domain " + domainName + " version " + versionNumber);
domainVersion.setDefunct(true);
domain.getStorageEngine().getRemoteDomainVersionDeleter(StorageEngine.RemoteLocation.DOMAIN_BUILDER).deleteVersion(versionNumber);
}
}
代码示例来源:origin: LiveRamp/hank
@Override
protected void validate() throws InvalidConfigurationException {
super.validate();
getRequiredSection(PARTITION_SERVER_SECTION_KEY);
getRequiredStringList(PARTITION_SERVER_SECTION_KEY, LOCAL_DATA_DIRS_KEY);
getRequiredInteger(PARTITION_SERVER_SECTION_KEY, SERVICE_PORT_KEY);
getRequiredString(PARTITION_SERVER_SECTION_KEY, RING_GROUP_NAME_KEY);
getRequiredSection(PARTITION_SERVER_SECTION_KEY, PARTITION_SERVER_DAEMON_SECTION_KEY);
getRequiredInteger(PARTITION_SERVER_SECTION_KEY, PARTITION_SERVER_DAEMON_SECTION_KEY,
NUM_CONCURRENT_QUERIES_KEY);
getRequiredInteger(PARTITION_SERVER_SECTION_KEY, PARTITION_SERVER_DAEMON_SECTION_KEY,
NUM_CONCURRENT_GET_BULK_TASKS);
getRequiredInteger(PARTITION_SERVER_SECTION_KEY, PARTITION_SERVER_DAEMON_SECTION_KEY,
GET_BULK_TASK_SIZE);
getRequiredInteger(PARTITION_SERVER_SECTION_KEY, PARTITION_SERVER_DAEMON_SECTION_KEY,
GET_TIMER_AGGREGATOR_WINDOW_KEY);
getRequiredInteger(PARTITION_SERVER_SECTION_KEY, PARTITION_SERVER_DAEMON_SECTION_KEY,
BUFFER_REUSE_MAX_SIZE);
getRequiredLong(PARTITION_SERVER_SECTION_KEY, PARTITION_SERVER_DAEMON_SECTION_KEY,
CACHE_NUM_BYTES_CAPACITY);
getRequiredLong(PARTITION_SERVER_SECTION_KEY, PARTITION_SERVER_DAEMON_SECTION_KEY,
CACHE_NUM_ITEMS_CAPACITY);
getRequiredSection(PARTITION_SERVER_SECTION_KEY, UPDATE_DAEMON_SECTION_KEY);
getRequiredInteger(PARTITION_SERVER_SECTION_KEY, UPDATE_DAEMON_SECTION_KEY, NUM_CONCURRENT_UPDATES_KEY);
getRequiredInteger(PARTITION_SERVER_SECTION_KEY, UPDATE_DAEMON_SECTION_KEY, MAX_CONCURRENT_UPDATES_PER_DATA_DIRECTORY_KEY);
}
代码示例来源:origin: LiveRamp/hank
public static void main(String[] args) throws IOException, InvalidConfigurationException {
CommandLineChecker.check(args, new String[]
{"domain name", "config path", "jobjar", "input path", "output_path"},
HadoopDomainBuilder.class);
String domainName = args[0];
CoordinatorConfigurator configurator = new YamlCoordinatorConfigurator(args[1]);
String jobJar = args[2];
String inputPath = args[3];
String outputPath = args[4];
DomainBuilderProperties properties = new DomainBuilderProperties(domainName, configurator).setOutputPath(outputPath);
JobConf conf = new JobConf();
conf.setJar(jobJar);
conf.setJobName(HadoopDomainBuilder.class.getSimpleName()
+ " Domain " + domainName + ", Output path: " + outputPath);
HadoopDomainBuilder builder = new HadoopDomainBuilder(conf, inputPath,
SequenceFileInputFormat.class,
DomainBuilderMapperDefault.class);
LOG.info("Building Hank domain " + domainName + " from input " + inputPath
+ " and coordinator configuration " + configurator);
// TODO: Create DomainVersionProperties
throw new NotImplementedException("TODO: Create DomainVersionProperties");
// builder.buildHankDomain(properties, null);
}
}
内容来源于网络,如有侵权,请联系作者删除!