本文整理了Java中kafka.utils.ZkUtils.<init>()
方法的一些代码示例,展示了ZkUtils.<init>()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZkUtils.<init>()
方法的具体详情如下:
包路径:kafka.utils.ZkUtils
类名称:ZkUtils
方法名:<init>
暂无
代码示例来源:origin: apache/incubator-gobblin
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), false);
int partitions = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.PARTITION_COUNT, KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT);
int replication = ConfigUtils.getInt(config, KafkaWriterConfigurationKeys.REPLICATION_COUNT, KafkaWriterConfigurationKeys.PARTITION_COUNT_DEFAULT);
代码示例来源:origin: apache/drill
public static void createTopicHelper(final String topicName, final int partitions) {
Properties topicProps = new Properties();
topicProps.put(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime");
topicProps.put(TopicConfig.RETENTION_MS_CONFIG, "-1");
ZkUtils zkUtils = new ZkUtils(zkClient,
new ZkConnection(embeddedKafkaCluster.getZkServer().getConnectionString()), false);
AdminUtils.createTopic(zkUtils, topicName, partitions, 1,
topicProps, RackAwareMode.Disabled$.MODULE$);
org.apache.kafka.common.requests.MetadataResponse.TopicMetadata fetchTopicMetadataFromZk =
AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
logger.info("Topic Metadata: " + fetchTopicMetadataFromZk);
}
代码示例来源:origin: linkedin/cruise-control
public BrokerFailureDetector(KafkaCruiseControlConfig config,
LoadMonitor loadMonitor,
Queue<Anomaly> anomalies,
Time time,
KafkaCruiseControl kafkaCruiseControl) {
String zkUrl = config.getString(KafkaCruiseControlConfig.ZOOKEEPER_CONNECT_CONFIG);
ZkConnection zkConnection = new ZkConnection(zkUrl, 30000);
_zkClient = new ZkClient(zkConnection, 30000, new ZkStringSerializer());
// Do not support secure ZK at this point.
_zkUtils = new ZkUtils(_zkClient, zkConnection, false);
_failedBrokers = new HashMap<>();
_failedBrokersZkPath = config.getString(KafkaCruiseControlConfig.FAILED_BROKERS_ZK_PATH_CONFIG);
_loadMonitor = loadMonitor;
_anomalies = anomalies;
_time = time;
_kafkaCruiseControl = kafkaCruiseControl;
_allowCapacityEstimation = config.getBoolean(KafkaCruiseControlConfig.ANOMALY_DETECTION_ALLOW_CAPACITY_ESTIMATION_CONFIG);
}
代码示例来源:origin: apache/incubator-gobblin
ZKStringSerializer$.MODULE$);
boolean isSecureKafkaCluster = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(liveZookeeper), isSecureKafkaCluster);
代码示例来源:origin: apache/incubator-gobblin
ZKStringSerializer$.MODULE$);
boolean isSecureKafkaCluster = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), isSecureKafkaCluster);
代码示例来源:origin: apache/drill
@BeforeClass
public static void initKafka() throws Exception {
synchronized (TestKafkaSuit.class) {
if (initCount.get() == 0) {
ZookeeperTestUtil.setZookeeperSaslTestConfigProps();
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, ClassLoader.getSystemResource(LOGIN_CONF_RESOURCE_PATHNAME).getFile());
embeddedKafkaCluster = new EmbeddedKafkaCluster();
Properties topicProps = new Properties();
zkClient = new ZkClient(embeddedKafkaCluster.getZkServer().getConnectionString(), SESSION_TIMEOUT, CONN_TIMEOUT, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(embeddedKafkaCluster.getZkServer().getConnectionString()), false);
AdminUtils.createTopic(zkUtils, TestQueryConstants.JSON_TOPIC, 1, 1, topicProps, RackAwareMode.Disabled$.MODULE$);
org.apache.kafka.common.requests.MetadataResponse.TopicMetadata fetchTopicMetadataFromZk = AdminUtils
.fetchTopicMetadataFromZk(TestQueryConstants.JSON_TOPIC, zkUtils);
logger.info("Topic Metadata: " + fetchTopicMetadataFromZk);
KafkaMessageGenerator generator = new KafkaMessageGenerator(embeddedKafkaCluster.getKafkaBrokerList(),
StringSerializer.class);
generator.populateJsonMsgIntoKafka(TestQueryConstants.JSON_TOPIC, NUM_JSON_MSG);
}
initCount.incrementAndGet();
runningSuite = true;
}
logger.info("Initialized Embedded Zookeeper and Kafka");
}
代码示例来源:origin: apache/incubator-druid
String zkHosts = config.getZookeeperHosts();
zkClient = new ZkClient(zkHosts, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
zkUtils = new ZkUtils(zkClient, new ZkConnection(zkHosts, sessionTimeoutMs), false);
if (config.manageKafkaTopic()) {
int numPartitions = 1;
代码示例来源:origin: apache/incubator-druid
String zkHosts = config.getZookeeperHosts();
zkClient = new ZkClient(zkHosts, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
zkUtils = new ZkUtils(zkClient, new ZkConnection(zkHosts, sessionTimeoutMs), false);
if (config.manageKafkaTopic()) {
int numPartitions = 4;
代码示例来源:origin: confluentinc/kafka-streams-examples
/**
* Delete a Kafka topic.
*
* @param topic The name of the topic.
*/
public void deleteTopic(String topic) {
log.debug("Deleting topic {}", topic);
ZkClient zkClient = new ZkClient(
zookeeperConnect(),
DEFAULT_ZK_SESSION_TIMEOUT_MS,
DEFAULT_ZK_CONNECTION_TIMEOUT_MS,
ZKStringSerializer$.MODULE$);
boolean isSecure = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure);
AdminUtils.deleteTopic(zkUtils, topic);
zkClient.close();
}
}
代码示例来源:origin: confluentinc/kafka-streams-examples
/**
* Create a Kafka topic with the given parameters.
*
* @param topic The name of the topic.
* @param partitions The number of partitions for this topic.
* @param replication The replication factor for (partitions of) this topic.
* @param topicConfig Additional topic-level configuration settings.
*/
public void createTopic(String topic,
int partitions,
int replication,
Properties topicConfig) {
log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }",
topic, partitions, replication, topicConfig);
// Note: You must initialize the ZkClient with ZKStringSerializer. If you don't, then
// createTopic() will only seem to work (it will return without error). The topic will exist in
// only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
// topic.
ZkClient zkClient = new ZkClient(
zookeeperConnect(),
DEFAULT_ZK_SESSION_TIMEOUT_MS,
DEFAULT_ZK_CONNECTION_TIMEOUT_MS,
ZKStringSerializer$.MODULE$);
boolean isSecure = false;
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), isSecure);
AdminUtils.createTopic(zkUtils, topic, partitions, replication, topicConfig, RackAwareMode.Enforced$.MODULE$);
zkClient.close();
}
代码示例来源:origin: apache/atlas
@VisibleForTesting
protected ZkUtils createZkUtils(Configuration atlasProperties) {
String zkConnect = atlasProperties.getString("atlas.kafka.zookeeper.connect");
int sessionTimeout = atlasProperties.getInt("atlas.kafka.zookeeper.session.timeout.ms", 400);
int connectionTimeout = atlasProperties.getInt("atlas.kafka.zookeeper.connection.timeout.ms", 200);
Tuple2<ZkClient, ZkConnection> zkClientAndConnection = ZkUtils.createZkClientAndConnection(
zkConnect, sessionTimeout, connectionTimeout);
return new ZkUtils(zkClientAndConnection._1(), zkClientAndConnection._2(), false);
}
代码示例来源:origin: pinterest/doctorkafka
public static ZkUtils getZkUtils(String zkUrl) {
if (!zkUtilsMap.containsKey(zkUrl)) {
Tuple2<ZkClient, ZkConnection> zkClientAndConnection =
ZkUtils.createZkClientAndConnection(zkUrl, 30000, 3000000);
ZkUtils zkUtils = new ZkUtils(zkClientAndConnection._1(), zkClientAndConnection._2(), true);
zkUtilsMap.put(zkUrl, zkUtils);
}
return zkUtilsMap.get(zkUrl);
}
代码示例来源:origin: org.apache.atlas/atlas-notification
@VisibleForTesting
protected ZkUtils createZkUtils(Configuration atlasProperties) {
String zkConnect = atlasProperties.getString("atlas.kafka.zookeeper.connect");
int sessionTimeout = atlasProperties.getInt("atlas.kafka.zookeeper.session.timeout.ms", 400);
int connectionTimeout = atlasProperties.getInt("atlas.kafka.zookeeper.connection.timeout.ms", 200);
Tuple2<ZkClient, ZkConnection> zkClientAndConnection = ZkUtils.createZkClientAndConnection(
zkConnect, sessionTimeout, connectionTimeout);
return new ZkUtils(zkClientAndConnection._1(), zkClientAndConnection._2(), false);
}
代码示例来源:origin: pinterest/doctorkafka
public static ZkUtils getZkUtils(String zkUrl) {
if (!zkUtilsMap.containsKey(zkUrl)) {
Tuple2<ZkClient, ZkConnection> zkClientAndConnection =
ZkUtils.createZkClientAndConnection(zkUrl, 30000, 3000000);
ZkUtils zkUtils = new ZkUtils(zkClientAndConnection._1(), zkClientAndConnection._2(), true);
zkUtilsMap.put(zkUrl, zkUtils);
}
return zkUtilsMap.get(zkUrl);
}
代码示例来源:origin: com.github.pinterest/kafkastats
public static ZkUtils getZkUtils(String zkUrl) {
if (!zkUtilsMap.containsKey(zkUrl)) {
Tuple2<ZkClient, ZkConnection> zkClientAndConnection =
ZkUtils.createZkClientAndConnection(zkUrl, 30000, 3000000);
ZkUtils zkUtils = new ZkUtils(zkClientAndConnection._1(), zkClientAndConnection._2(), true);
zkUtilsMap.put(zkUrl, zkUtils);
}
return zkUtilsMap.get(zkUrl);
}
代码示例来源:origin: vakinge/jeesuite-libs
public ZkConsumerCommand(ZkClient zkClient,String zkServers,String kafkaServers) {
kafkaServerList.addAll(Arrays.asList(kafkaServers.split(",")));
if(zkClient == null){
zkClient = new ZkClient(zkServers, 10000, 10000, new ZKStringSerializer());
}
this.zkClient = zkClient;
boolean isSecureKafkaCluster = false;
zkUtils = new ZkUtils(zkClient, new ZkConnection(zkServers), isSecureKafkaCluster);
}
代码示例来源:origin: com.cerner.common.kafka/common-kafka-test
/**
* Startup Zookeeper.
*
* @throws IOException if an error occurs during Zookeeper initialization.
*/
public void setUp() throws IOException {
zookeeper = new EmbeddedZookeeper(zookeeperConnect);
ZkClient zkClient = new ZkClient(zookeeperConnect, zkSessionTimeout, zkConnectionTimeout, ZKStringSerializer$.MODULE$);
ZkConnection connection = new ZkConnection(zookeeperConnect, zkSessionTimeout);
zkUtils = new ZkUtils(zkClient, connection, false);
}
代码示例来源:origin: apache/atlas
public KafkaBridge(Configuration atlasConf, AtlasClientV2 atlasClientV2) throws Exception {
String zookeeperConnect = getZKConnection(atlasConf);
int sessionTimeOutMs = atlasConf.getInt(ZOOKEEPER_SESSION_TIMEOUT_MS, DEFAULT_ZOOKEEPER_SESSION_TIMEOUT_MS) ;
int connectionTimeOutMs = atlasConf.getInt(ZOOKEEPER_CONNECTION_TIMEOUT_MS, DEFAULT_ZOOKEEPER_CONNECTION_TIMEOUT_MS);
ZkClient zkClient = new ZkClient(zookeeperConnect, sessionTimeOutMs, connectionTimeOutMs, ZKStringSerializer$.MODULE$);
this.atlasClientV2 = atlasClientV2;
this.clusterName = atlasConf.getString(KAFKA_CLUSTER_NAME, DEFAULT_CLUSTER_NAME);
this.zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), JaasUtils.isZkSecurityEnabled());
this.availableTopics = scala.collection.JavaConversions.seqAsJavaList(zkUtils.getAllTopics());
}
代码示例来源:origin: vakinge/jeesuite-libs
public ZkConsumerCommand(String zkServers,String kafkaServers) {
kafkaServerList.addAll(Arrays.asList(kafkaServers.split(",")));
if(zkClient == null){
zkClient = new ZkClient(zkServers, 10000, 10000, new ZKStringSerializer());
}
zkClient = new ZkClient(zkServers, 10000, 10000, new ZKStringSerializer());
boolean isSecureKafkaCluster = false;
zkUtils = new ZkUtils(zkClient, new ZkConnection(zkServers), isSecureKafkaCluster);
}
代码示例来源:origin: OpenNMS/opennms
private List<BrokerMetadata> getBrokerMetadatas() {
ZkClient zkClient = new ZkClient(getZookeeperConnectString(), 1000, 1000, ZKStringSerializer$.MODULE$);
ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(getZookeeperConnectString()), false);
return JavaConversions.seqAsJavaList(AdminUtils.getBrokerMetadatas(zkUtils, Enforced$.MODULE$, Option.empty()));
}
内容来源于网络,如有侵权,请联系作者删除!