java.util.concurrent.atomic.AtomicReference.getAndUpdate()方法的使用及代码示例

x33g5p2x  于2022-01-15 转载在 其他  
字(10.5k)|赞(0)|评价(0)|浏览(138)

本文整理了Java中java.util.concurrent.atomic.AtomicReference.getAndUpdate()方法的一些代码示例,展示了AtomicReference.getAndUpdate()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。AtomicReference.getAndUpdate()方法的具体详情如下:
包路径:java.util.concurrent.atomic.AtomicReference
类名称:AtomicReference
方法名:getAndUpdate

AtomicReference.getAndUpdate介绍

暂无

代码示例

代码示例来源:origin: neo4j/neo4j

@Override
public void stopAutoIndexingProperty( String propName )
{
  propertyKeysToInclude.getAndUpdate( current ->
  {
    Set<String> updated = new HashSet<>();
    updated.addAll( current );
    updated.remove( propName );
    return updated;
  });
}

代码示例来源:origin: neo4j/neo4j

@Override
public void startAutoIndexingProperty( String propName )
{
  propertyKeysToInclude.getAndUpdate( current ->
  {
    Set<String> updated = new HashSet<>();
    updated.addAll( current );
    updated.add( propName );
    return updated;
  });
}

代码示例来源:origin: apache/storm

private Set<List<Integer>> getOrUpdateExecutors(String topoId, StormBase base, Map<String, Object> topoConf,
                        StormTopology topology)
  throws IOException, AuthorizationException, InvalidTopologyException, KeyNotFoundException {
  Set<List<Integer>> executors = idToExecutors.get().get(topoId);
  if (null == executors) {
    executors = new HashSet<>(computeExecutors(topoId, base, topoConf, topology));
    idToExecutors.getAndUpdate(new Assoc<>(topoId, executors));
  }
  return executors;
}

代码示例来源:origin: micronaut-projects/micronaut-core

/**
 * Closes any file related access if the upload is on
 * disk and releases the buffer for the file.
 */
void destroy() {
  fileAccess.getAndUpdate(channel -> {
    if (channel != null) {
      try {
        channel.close();
      } catch (IOException e) {
        LOG.warn("Error closing file channel for disk file upload", e);
      }
    }
    return null;
  });
  data.release();
}

代码示例来源:origin: runelite/runelite

private String recolorMessage(boolean transparent, String message, ChatMessageType messageType)
{
  final Collection<ChatColor> chatColors = colorCache.get(messageType);
  final AtomicReference<String> resultMessage = new AtomicReference<>(message);
  // Replace custom formatting with actual colors
  chatColors.stream()
    .filter(chatColor -> chatColor.isTransparent() == transparent)
    .forEach(chatColor ->
      resultMessage.getAndUpdate(oldMessage -> oldMessage.replaceAll(
        "<col" + chatColor.getType().name() + ">",
        ColorUtil.colorTag(chatColor.getColor()))));
  return resultMessage.get();
}

代码示例来源:origin: ehcache/ehcache3

@Override
public void start(BundleContext context) throws Exception {
 BundleContext currentContext = CORE_BUNDLE.getAndUpdate(current -> current == null ? context : current);
 if (currentContext == null) {
  String greeting = "Detected OSGi Environment (core is in bundle: " + context.getBundle() + ")";
  if ("false".equalsIgnoreCase(context.getProperty(OSGI_LOADING))) {
   SafeOsgi.disableOSGiServiceLoading();
   LOGGER.info(greeting + ": OSGi Based Service Loading Disabled Via System/Framework Property - Extensions Outside This Bundle Will Not Be Detected");
   LOGGER.debug("JDK Service Loading Sees:\n\t" + stream(spliterator(ClassLoading.servicesOfType(ServiceFactory.class).iterator(), Long.MAX_VALUE, 0), false)
    .map(sf -> sf.getServiceType().getName()).collect(joining("\n\t")));
  } else {
   SafeOsgi.enableOSGiServiceLoading();
   LOGGER.info(greeting + ": Using OSGi Based Service Loading");
  }
 } else {
  throw new IllegalStateException("Multiple bundle instances running against the same core classes: existing bundle: " + currentContext.getBundle() + " new bundle: " + context.getBundle());
 }
}

代码示例来源:origin: apache/storm

private Map<WorkerSlot, WorkerResources> getWorkerResourcesForTopology(String topoId) {
  Map<WorkerSlot, WorkerResources> ret = idToWorkerResources.get().get(topoId);
  if (ret == null) {
    IStormClusterState state = stormClusterState;
    ret = new HashMap<>();
    Assignment assignment = state.assignmentInfo(topoId, null);
    if (assignment != null && assignment.is_set_worker_resources()) {
      for (Entry<NodeInfo, WorkerResources> entry : assignment.get_worker_resources().entrySet()) {
        NodeInfo ni = entry.getKey();
        WorkerSlot slot = new WorkerSlot(ni.get_node(), ni.get_port_iterator().next());
        ret.put(slot, entry.getValue());
      }
      idToWorkerResources.getAndUpdate(new Assoc<>(topoId, ret));
    }
  }
  return ret;
}

代码示例来源:origin: apache/storm

cachedNodeToPortSocket.getAndUpdate(prev -> {
  Map<NodeInfo, IConnection> next = new HashMap<>(prev);
  for (NodeInfo nodeInfo : newConnections) {
cachedNodeToPortSocket.getAndUpdate(prev -> {
  Map<NodeInfo, IConnection> next = new HashMap<>(prev);
  removeConnections.forEach(next::remove);

代码示例来源:origin: apache/storm

public void resetLogLevels() {
  TreeMap<String, LogLevel> latestLogLevelMap = latestLogConfig.get();
  LOG.debug("Resetting log levels: Latest log config is {}", latestLogLevelMap);
  LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false);
  for (String loggerName : latestLogLevelMap.descendingKeySet()) {
    LogLevel loggerSetting = latestLogLevelMap.get(loggerName);
    long timeout = loggerSetting.get_reset_log_level_timeout_epoch();
    String resetLogLevel = loggerSetting.get_reset_log_level();
    if (timeout < Time.currentTimeMillis()) {
      LOG.info("{}: Resetting level to {}", loggerName, resetLogLevel);
      setLoggerLevel(loggerContext, loggerName, resetLogLevel);
      latestLogConfig.getAndUpdate(input -> {
        TreeMap<String, LogLevel> result = new TreeMap<>(input);
        result.remove(loggerName);
        return result;
      });
    }
  }
  loggerContext.updateLoggers();
}

代码示例来源:origin: resilience4j/resilience4j

private void stateTransition(State newState, Function<CircuitBreakerState, CircuitBreakerState> newStateGenerator) {
  CircuitBreakerState previousState = stateReference.getAndUpdate(currentState -> {
    if (currentState.getState() == newState) {
      return currentState;
    }
    return newStateGenerator.apply(currentState);
  });
  if (previousState.getState() != newState) {
    publishStateTransitionEvent(StateTransition.transitionBetween(previousState.getState(), newState));
  }
}

代码示例来源:origin: Netflix/concurrency-limits

@Override
public void accept(Long sample) {
  foo.getAndUpdate(current -> new Metrics(current.count + 1, current.total + sample));
}

代码示例来源:origin: micronaut-projects/micronaut-core

fileAccess.getAndUpdate(channel -> {
  if (channel == null) {
    try {

代码示例来源:origin: apache/storm

private void startTopology(String topoName, String topoId, TopologyStatus initStatus, String owner,
              String principal, Map<String, Object> topoConf, StormTopology stormTopology)
  throws KeyNotFoundException, AuthorizationException, IOException, InvalidTopologyException {
  assert (TopologyStatus.ACTIVE == initStatus || TopologyStatus.INACTIVE == initStatus);
  IStormClusterState state = stormClusterState;
  Map<String, Integer> numExecutors = new HashMap<>();
  StormTopology topology = StormCommon.systemTopology(topoConf, stormTopology);
  for (Entry<String, Object> entry : StormCommon.allComponents(topology).entrySet()) {
    numExecutors.put(entry.getKey(), StormCommon.numStartExecutors(entry.getValue()));
  }
  LOG.info("Activating {}: {}", topoName, topoId);
  StormBase base = new StormBase();
  base.set_name(topoName);
  if (topoConf.containsKey(Config.TOPOLOGY_VERSION)) {
    base.set_topology_version(ObjectReader.getString(topoConf.get(Config.TOPOLOGY_VERSION)));
  }
  base.set_launch_time_secs(Time.currentTimeSecs());
  base.set_status(initStatus);
  base.set_num_workers(ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_WORKERS), 0));
  base.set_component_executors(numExecutors);
  base.set_owner(owner);
  base.set_principal(principal);
  base.set_component_debug(new HashMap<>());
  state.activateStorm(topoId, base, topoConf);
  idToExecutors.getAndUpdate(new Assoc<>(topoId,
    new HashSet<>(computeExecutors(topoId, base, topoConf, stormTopology))));
  notifyTopologyActionListener(topoName, "activate");
}

代码示例来源:origin: resilience4j/resilience4j

@Override
public void reset() {
  CircuitBreakerState previousState = stateReference.getAndUpdate(currentState -> new ClosedState(this));
  if (previousState.getState() != CLOSED) {
    publishStateTransitionEvent(StateTransition.transitionBetween(previousState.getState(), CLOSED));
  }
  publishResetEvent();
}

代码示例来源:origin: apache/storm

@VisibleForTesting
public void doCleanup() throws Exception {
  if (!isLeader()) {
    LOG.info("not a leader, skipping cleanup");
    return;
  }
  IStormClusterState state = stormClusterState;
  Set<String> toClean;
  synchronized (submitLock) {
    toClean = topoIdsToClean(state, blobStore, this.conf);
  }
  if (toClean != null) {
    for (String topoId : toClean) {
      LOG.info("Cleaning up {}", topoId);
      state.teardownHeartbeats(topoId);
      state.teardownTopologyErrors(topoId);
      state.removeAllPrivateWorkerKeys(topoId);
      state.removeBackpressure(topoId);
      rmDependencyJarsInTopology(topoId);
      forceDeleteTopoDistDir(topoId);
      rmTopologyKeys(topoId);
      heartbeatsCache.removeTopo(topoId);
      idToExecutors.getAndUpdate(new Dissoc<>(topoId));
    }
  }
}

代码示例来源:origin: micronaut-projects/micronaut-core

typeVariable = typeVariable.getFirstTypeVariable().orElse(Argument.OBJECT_ARGUMENT);
dataReference.subject.getAndUpdate(subject -> {
  if (subject == null) {
    ReplaySubject childSubject = ReplaySubject.create();
dataReference.upload.getAndUpdate(upload -> {
  if (upload == null) {
    return new NettyStreamingFileUpload(

代码示例来源:origin: apache/storm

void doRebalance(String topoId, StormBase stormBase) throws Exception {
  RebalanceOptions rbo = stormBase.get_topology_action_options().get_rebalance_options();
  StormBase updated = new StormBase();
  updated.set_topology_action_options(null);
  updated.set_component_debug(Collections.emptyMap());
  if (rbo.is_set_num_executors()) {
    updated.set_component_executors(rbo.get_num_executors());
  }
  if (rbo.is_set_num_workers()) {
    updated.set_num_workers(rbo.get_num_workers());
  }
  stormClusterState.updateStorm(topoId, updated);
  updateBlobStore(topoId, rbo, ServerUtils.principalNameToSubject(rbo.get_principal()));
  idToExecutors.getAndUpdate(new Dissoc<>(topoId)); // remove the executors cache to let it recompute.
  mkAssignments(topoId);
}

代码示例来源:origin: org.neo4j/neo4j-kernel

@Override
public void startAutoIndexingProperty( String propName )
{
  propertyKeysToInclude.getAndUpdate( current ->
  {
    Set<String> updated = new HashSet<>();
    updated.addAll( current );
    updated.add( propName );
    return updated;
  });
}

代码示例来源:origin: io.leangen.graphql/graphql-spqr-spring-boot-autoconfigure

@Override
public void afterConnectionClosed(WebSocketSession session, CloseStatus status) {
  cancelAll();
  if (taskScheduler != null) {
    this.keepAlive.getAndUpdate(task -> {
      if (task != null) {
        task.cancel(false);
      }
      return null;
    });
  }
}

代码示例来源:origin: com.tomitribe.tribestream/tribestream-container

private void addHost(final HostMetaData host) {
  this.healthyHosts.getAndUpdate(healthyHosts -> {
    final List<HostMetaData> hosts = healthyHosts.getHosts();
    final List<HostMetaData> updatedHosts = new ArrayList<>(hosts);
    if (updatedHosts.add(host)) {
      return HealthyHosts.healthy(updatedHosts);
    }
    return healthyHosts;
  });
}

相关文章