org.apache.storm.utils.Utils.readDefaultConfig()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(11.6k)|赞(0)|评价(0)|浏览(149)

本文整理了Java中org.apache.storm.utils.Utils.readDefaultConfig()方法的一些代码示例,展示了Utils.readDefaultConfig()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utils.readDefaultConfig()方法的具体详情如下:
包路径:org.apache.storm.utils.Utils
类名称:Utils
方法名:readDefaultConfig

Utils.readDefaultConfig介绍

暂无

代码示例

代码示例来源:origin: apache/storm

private Map<String, Object> mkConf(Map<String, Object> extra) {
  Map<String, Object> config = Utils.readDefaultConfig();
  config.putAll(extra);
  return config;
}

代码示例来源:origin: apache/storm

public static Map<String, Object> readStormConfig() {
  Map<String, Object> ret = readDefaultConfig();
  String confFile = System.getProperty("storm.conf.file");
  Map<String, Object> storm;
  if (confFile == null || confFile.equals("")) {
    storm = findAndReadConfigFile("storm.yaml", false);
  } else {
    storm = findAndReadConfigFile(confFile, true);
  }
  ret.putAll(storm);
  ret.putAll(readCommandLineOpts());
  return ret;
}

代码示例来源:origin: apache/storm

/**
 * Initialize a fake config.
 * @return conf
 */
private static Config initializedConfig() {
  Config conf = new Config();
  conf.putAll(Utils.readDefaultConfig());
  ArrayList<String> nimbusSeeds = new ArrayList<>();
  nimbusSeeds.add(NIMBUS_HOST);
  conf.put(Config.NIMBUS_SEEDS, nimbusSeeds);
  conf.put(Config.NIMBUS_THRIFT_PORT, NIMBUS_PORT);
  return conf;
}

代码示例来源:origin: apache/storm

@Before
public void setup() {
  conf = Utils.readDefaultConfig();
  conf.put(Config.STORM_NIMBUS_RETRY_TIMES, 0);
}

代码示例来源:origin: apache/storm

@Test(expected = RuntimeException.class)
public void test_throws_runtimeexception_when_no_such_class() {
  Map<String, Object> conf = Utils.readDefaultConfig();
  conf.put(Config.TOPOLOGY_TUPLE_SERIALIZER, "null.this.class.does.not.exist");
  SerializationFactory.getKryo(conf);
}

代码示例来源:origin: apache/storm

@Test
public void test_registers_when_valid_class_name() {
  Class arbitraryClass = BlowfishTupleSerializer.class;
  String secretKey = "0123456789abcdef";
  Map<String, Object> conf = Utils.readDefaultConfig();
  conf.put(Config.TOPOLOGY_TUPLE_SERIALIZER, arbitraryClass.getName());
  conf.put(BlowfishTupleSerializer.SECRET_KEY, secretKey);
  Kryo kryo = SerializationFactory.getKryo(conf);
  Assert.assertEquals(arbitraryClass, kryo.getSerializer(ListDelegate.class).getClass());
}

代码示例来源:origin: apache/storm

@Test
public void test_registers_default_when_not_in_conf() throws ClassNotFoundException {
  Map<String, Object> conf = Utils.readDefaultConfig();
  String className = (String) conf.get(Config.TOPOLOGY_TUPLE_SERIALIZER);
  Class configuredClass = Class.forName(className);
  Kryo kryo = SerializationFactory.getKryo(conf);
  Assert.assertEquals(configuredClass, kryo.getSerializer(ListDelegate.class).getClass());
}

代码示例来源:origin: apache/storm

public static Config createClusterConfig(double compPcore, double compOnHeap, double compOffHeap,
                     Map<String, Map<String, Number>> pools) {
  Config config = new Config();
  config.putAll(Utils.readDefaultConfig());
  config.put(Config.STORM_NETWORK_TOPOGRAPHY_PLUGIN, GenSupervisorsDnsToSwitchMapping.class.getName());
  config.put(DaemonConfig.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, DefaultSchedulingPriorityStrategy.class.getName());
  config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, DefaultResourceAwareStrategy.class.getName());
  config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, compPcore);
  config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, compOffHeap);
  config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, compOnHeap);
  if (pools != null) {
    config.put(DaemonConfig.RESOURCE_AWARE_SCHEDULER_USER_POOLS, pools);
  }
  return config;
}

代码示例来源:origin: apache/storm

@Test
public void newCuratorUsesExponentialBackoffTest() throws InterruptedException {
  final int expectedInterval = 2400;
  final int expectedRetries = 10;
  final int expectedCeiling = 3000;
  Map<String, Object> config = Utils.readDefaultConfig();
  config.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, expectedInterval);
  config.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, expectedRetries);
  config.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING, expectedCeiling);
  CuratorFramework curator = CuratorUtils.newCurator(config, Arrays.asList("bogus_server"), 42, "",
                            DaemonType.WORKER.getDefaultZkAcls(config));
  StormBoundedExponentialBackoffRetry policy =
    (StormBoundedExponentialBackoffRetry) curator.getZookeeperClient().getRetryPolicy();
  Assert.assertEquals(policy.getBaseSleepTimeMs(), expectedInterval);
  Assert.assertEquals(policy.getN(), expectedRetries);
  Assert.assertEquals(policy.getSleepTimeMs(10, 0), expectedCeiling);
}

代码示例来源:origin: apache/storm

INimbus iNimbus = new TestUtilsForBlacklistScheduler.INimbusTest();
Config config = new Config();
config.putAll(Utils.readDefaultConfig());
config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200);
config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2);

代码示例来源:origin: apache/storm

@Test
public void testSubmitTopologyToLocalNimbus() throws Exception {
  int port = Utils.getAvailablePort();
  try (ILocalCluster localCluster = new LocalCluster.Builder()
    .withNimbusDaemon(true)
    .withDaemonConf(Config.NIMBUS_THRIFT_PORT, port)
    .build()) {
    Config topoConf = new Config();
    topoConf.putAll(Utils.readDefaultConfig());
    topoConf.setDebug(true);
    topoConf.put("storm.cluster.mode", "local"); // default is aways "distributed" but here local cluster is being used.
    topoConf.put(Config.STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN, InmemoryTopologySubmitterHook.class.getName());
    topoConf.put(Config.NIMBUS_THRIFT_PORT, port);
    List<TopologyDetails> topologyNames = new ArrayList<>();
    for (int i = 0; i < 4; i++) {
      final String topologyName = "word-count-" + UUID.randomUUID().toString();
      final StormTopology stormTopology = createTestTopology();
      topologyNames.add(new TopologyDetails(topologyName, stormTopology));
      localCluster.submitTopology(topologyName, topoConf, stormTopology);
    }
    Assert.assertEquals(InmemoryTopologySubmitterHook.submittedTopologies, topologyNames);
  }
}

代码示例来源:origin: apache/storm

config.putAll(Utils.readDefaultConfig());
config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME,200);
config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT,2);

代码示例来源:origin: apache/storm

public void testSetupAndTearDown() throws IOException {
  Config config = new Config();
  config.putAll(Utils.readDefaultConfig());

代码示例来源:origin: apache/storm

@Test
public void TestBadSlot() {
  INimbus iNimbus = new TestUtilsForBlacklistScheduler.INimbusTest();
  Map<String, SupervisorDetails> supMap = TestUtilsForBlacklistScheduler.genSupervisors(3, 4);
  Config config = new Config();
  config.putAll(Utils.readDefaultConfig());
  config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200);
  config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2);
  config.put(DaemonConfig.BLACKLIST_SCHEDULER_RESUME_TIME, 300);
  Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
  TopologyDetails topo1 = TestUtilsForBlacklistScheduler.getTopology("topo-1", config, 5, 15, 1, 1, currentTime - 2, true);
  topoMap.put(topo1.getId(), topo1);
  Topologies topologies = new Topologies(topoMap);
  StormMetricsRegistry metricsRegistry = new StormMetricsRegistry();
  ResourceMetrics resourceMetrics = new ResourceMetrics(metricsRegistry);
  Cluster cluster = new Cluster(iNimbus, resourceMetrics, supMap, new HashMap<String, SchedulerAssignmentImpl>(), topologies, config);
  BlacklistScheduler bs = new BlacklistScheduler(new DefaultScheduler(), metricsRegistry);
  bs.prepare(config);
  bs.schedule(topologies, cluster);
  cluster = new Cluster(iNimbus, resourceMetrics, TestUtilsForBlacklistScheduler.removePortFromSupervisors(supMap, "sup-0", 0), TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config);
  bs.schedule(topologies, cluster);
  cluster = new Cluster(iNimbus, resourceMetrics, TestUtilsForBlacklistScheduler.removePortFromSupervisors(supMap, "sup-0", 0), TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config);
  bs.schedule(topologies, cluster);
  cluster = new Cluster(iNimbus, resourceMetrics, supMap, new HashMap<String, SchedulerAssignmentImpl>(), topologies, config);
  bs.schedule(topologies, cluster);
  Assert.assertEquals("blacklist", Collections.singleton("host-0"), cluster.getBlacklistedHosts());
}

代码示例来源:origin: apache/storm

config.putAll(Utils.readDefaultConfig());
config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200);
config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2);

代码示例来源:origin: apache/metron

private Map readStormConfigWithoutCLI() {
 Map ret = Utils.readDefaultConfig();
 String confFile = System.getProperty("storm.conf.file");
 Map storm;
 if (confFile == null || confFile.equals("")) {
  storm = Utils.findAndReadConfigFile("storm.yaml", false);
 } else {
  storm = Utils.findAndReadConfigFile(confFile, true);
 }
 ret.putAll(storm);
 return ret;
}

代码示例来源:origin: apache/storm

config.putAll(Utils.readDefaultConfig());
config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200);
config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2);

代码示例来源:origin: apache/storm

@Test
public void TestBadSupervisor() {
  INimbus iNimbus = new TestUtilsForBlacklistScheduler.INimbusTest();
  Map<String, SupervisorDetails> supMap = TestUtilsForBlacklistScheduler.genSupervisors(3, 4);
  Config config = new Config();
  config.putAll(Utils.readDefaultConfig());
  config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200);
  config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2);
  config.put(DaemonConfig.BLACKLIST_SCHEDULER_RESUME_TIME, 300);
  Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
  TopologyDetails topo1 = TestUtilsForBlacklistScheduler.getTopology("topo-1", config, 5, 15, 1, 1, currentTime - 2, true);
  topoMap.put(topo1.getId(), topo1);
  Topologies topologies = new Topologies(topoMap);
  StormMetricsRegistry metricsRegistry = new StormMetricsRegistry();
  ResourceMetrics resourceMetrics = new ResourceMetrics(metricsRegistry);
  Cluster cluster = new Cluster(iNimbus, resourceMetrics, supMap, new HashMap<>(), topologies, config);
  BlacklistScheduler bs = new BlacklistScheduler(new DefaultScheduler(), metricsRegistry);
  bs.prepare(config);
  bs.schedule(topologies, cluster);
  cluster = new Cluster(iNimbus, resourceMetrics, TestUtilsForBlacklistScheduler.removeSupervisorFromSupervisors(supMap, "sup-0"), TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config);
  bs.schedule(topologies, cluster);
  cluster = new Cluster(iNimbus, resourceMetrics, TestUtilsForBlacklistScheduler.removeSupervisorFromSupervisors(supMap, "sup-0"), TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config);
  bs.schedule(topologies, cluster);
  cluster = new Cluster(iNimbus, resourceMetrics, supMap, TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config);
  bs.schedule(topologies, cluster);
  Assert.assertEquals("blacklist", Collections.singleton("host-0"), cluster.getBlacklistedHosts());
}

代码示例来源:origin: org.apache.storm/storm-core

public static Map<String, Object> readStormConfig() {
  Map<String, Object> ret = readDefaultConfig();
  String confFile = System.getProperty("storm.conf.file");
  Map<String, Object> storm;
  if (confFile == null || confFile.equals("")) {
    storm = findAndReadConfigFile("storm.yaml", false);
  } else {
    storm = findAndReadConfigFile(confFile, true);
  }
  ret.putAll(storm);
  ret.putAll(readCommandLineOpts());
  return ret;
}

代码示例来源:origin: DigitalPebble/storm-crawler

@Test
  public void testSerialization() throws IOException {
    Map conf = Utils.readDefaultConfig();
    Config.registerSerialization(conf, Metadata.class);

    KryoValuesSerializer kvs = new KryoValuesSerializer(conf);
    Metadata md = new Metadata();
    byte[] content = kvs.serializeObject(md);

    KryoValuesDeserializer kvd = new KryoValuesDeserializer(conf);
    Metadata md2 = (Metadata) kvd.deserializeObject(content);

    // TODO compare md1 and md2
  }
}

相关文章

Utils类方法