本文整理了Java中java.util.LinkedHashSet.addAll()
方法的一些代码示例,展示了LinkedHashSet.addAll()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。LinkedHashSet.addAll()
方法的具体详情如下:
包路径:java.util.LinkedHashSet
类名称:LinkedHashSet
方法名:addAll
暂无
代码示例来源:origin: hibernate/hibernate-orm
public Builder setTypeInfoSet(LinkedHashSet<TypeInfo> typeInfoSet) {
if ( this.typeInfoSet == null ) {
this.typeInfoSet = typeInfoSet;
}
else {
this.typeInfoSet.addAll( typeInfoSet );
}
return this;
}
代码示例来源:origin: aws/aws-sdk-java
/**
* Fluent method to add the elements from the specified collection to this
* set.
*/
public FluentHashSet<E> withAll(Collection<? extends E> c) {
super.addAll(c);
return this;
}
代码示例来源:origin: apache/incubator-druid
private void resolveWaitingFutures()
{
LinkedHashSet<CustomSettableFuture> waitingFuturesCopy = new LinkedHashSet<>();
synchronized (waitingFutures) {
waitingFuturesCopy.addAll(waitingFutures);
waitingFutures.clear();
}
for (CustomSettableFuture future : waitingFuturesCopy) {
future.resolve();
}
}
代码示例来源:origin: spotbugs/spotbugs
public static <T> List<T> appendWithoutDuplicates(List<T> lst1, List<T> lst2) {
LinkedHashSet<T> joined = new LinkedHashSet<>(lst1);
joined.addAll(lst2);
return new ArrayList<>(joined);
}
代码示例来源:origin: linkedin/cruise-control
/**
* Get a LinkedHashSet of unique broker ids containing required number of replicas by broker id before secondary
* eligible broker ids.
*/
private LinkedHashSet<Integer> sortedCandidateBrokerIds() {
LinkedHashSet<Integer> sortedEligibleBrokerIds = new LinkedHashSet<>(_requiredNumReplicasByBrokerId.keySet());
sortedEligibleBrokerIds.addAll(_secondaryEligibleBrokerIds);
return sortedEligibleBrokerIds;
}
代码示例来源:origin: apache/flink
/**
* Returns the registered Kryo types.
*/
public LinkedHashSet<Class<?>> getRegisteredKryoTypes() {
if (isForceKryoEnabled()) {
// if we force kryo, we must also return all the types that
// were previously only registered as POJO
LinkedHashSet<Class<?>> result = new LinkedHashSet<>();
result.addAll(registeredKryoTypes);
for(Class<?> t : registeredPojoTypes) {
if (!result.contains(t)) {
result.add(t);
}
}
return result;
} else {
return registeredKryoTypes;
}
}
代码示例来源:origin: nutzam/nutz
public String[] getNames() {
LinkedHashSet<String> list = new LinkedHashSet<String>();
list.addAll(Arrays.asList(loader.getName()));
if (context != null)
list.addAll(context.names());
return list.toArray(new String[list.size()]);
}
代码示例来源:origin: jersey/jersey
/**
* Merges {@link MultiException} with all {@code throwables} registered in it.
*
* @param me {@code MultiException} to merge.
*/
public void addMultiException(MultiException me) {
if (me == null) {
return;
}
if (throwables == null) {
throwables = new LinkedHashSet<>();
}
throwables.addAll(me.getErrors());
}
代码示例来源:origin: jersey/jersey
/**
* Adds a throwable to the list of throwables in this collector.
*
* @param th The throwable to add to the list.
*/
public void addThrowable(Throwable th) {
if (th == null) {
return;
}
if (throwables == null) {
throwables = new LinkedHashSet<>();
}
if (th instanceof MultiException) {
throwables.addAll(((MultiException) th).getErrors());
} else {
throwables.add(th);
}
}
代码示例来源:origin: go-lang-plugin-org/go-lang-idea-plugin
@NotNull
public static LinkedHashSet<VirtualFile> getVendoringAwareSourcesPathsToLookup(@NotNull Project project,
@Nullable Module module,
@Nullable VirtualFile contextFile) {
LinkedHashSet<VirtualFile> sdkAndGoPath = getSourcesPathsToLookup(project, module);
if (contextFile != null) {
Collection<VirtualFile> vendorDirectories = collectVendorDirectories(contextFile, sdkAndGoPath);
if (!vendorDirectories.isEmpty()) {
LinkedHashSet<VirtualFile> result = newLinkedHashSet(vendorDirectories);
result.addAll(sdkAndGoPath);
return result;
}
}
return sdkAndGoPath;
}
代码示例来源:origin: SonarSource/sonarqube
public T addFromMandatoryProperty(Props props, String propertyName) {
String value = props.nonNullValue(propertyName);
if (!value.isEmpty()) {
List<String> jvmOptions = Arrays.stream(value.split(" (?=-)")).map(String::trim).collect(Collectors.toList());
checkOptionFormat(propertyName, jvmOptions);
checkMandatoryOptionOverwrite(propertyName, jvmOptions);
options.addAll(jvmOptions);
}
return castThis();
}
代码示例来源:origin: alibaba/jvm-sandbox
@Override
protected LinkedHashSet<ClassStructure> initialValue() {
final LinkedHashSet<ClassStructure> familySuperClassStructures = new LinkedHashSet<ClassStructure>();
final ClassStructure superClassStructure = getSuperClassStructure();
if (null != superClassStructure) {
// 1. 先加自己的父类
familySuperClassStructures.add(superClassStructure);
// 2. 再加父类的祖先
familySuperClassStructures.addAll(superClassStructure.getFamilySuperClassStructures());
}
return familySuperClassStructures;
}
};
代码示例来源:origin: redisson/redisson
final LineageInfo sl = getLineageInfo(sc);
if (sl != null) {
ancestors.addAll(sl.lineage);
specificity += sl.specificity;
if (il != null) {
ancestors.removeAll(il.lineage);
ancestors.addAll(il.lineage);
specificity += il.specificity;
代码示例来源:origin: apache/kylin
private void amendAllColumns() {
// make sure all PF/FK are included, thus become exposed to calcite later
Set<TableRef> tables = collectTablesOnJoinChain(allColumns);
for (TableRef t : tables) {
JoinDesc join = model.getJoinByPKSide(t);
if (join != null) {
allColumns.addAll(Arrays.asList(join.getForeignKeyColumns()));
allColumns.addAll(Arrays.asList(join.getPrimaryKeyColumns()));
}
}
for (TblColRef col : allColumns) {
allColumnDescs.add(col.getColumnDesc());
}
}
代码示例来源:origin: apache/ignite
/**
* @param node Node.
* @return {@link LinkedHashSet} of internal and external addresses of provided node.
* Internal addresses placed before external addresses.
*/
LinkedHashSet<InetSocketAddress> getNodeAddresses(TcpDiscoveryNode node) {
LinkedHashSet<InetSocketAddress> res = new LinkedHashSet<>(node.socketAddresses());
Collection<InetSocketAddress> extAddrs = node.attribute(createSpiAttributeName(ATTR_EXT_ADDRS));
if (extAddrs != null)
res.addAll(extAddrs);
return res;
}
代码示例来源:origin: joelittlejohn/jsonschema2pojo
/**
* Recursive, walks the schema tree and assembles a list of all properties of this schema's super schemas
*/
private LinkedHashSet<String> getSuperTypeConstructorPropertiesRecursive(JsonNode node, Schema schema, boolean onlyRequired) {
Schema superTypeSchema = getSuperSchema(node, schema, true);
if (superTypeSchema == null) {
return new LinkedHashSet<>();
}
JsonNode superSchemaNode = superTypeSchema.getContent();
LinkedHashSet<String> rtn = getConstructorProperties(superSchemaNode, onlyRequired);
rtn.addAll(getSuperTypeConstructorPropertiesRecursive(superSchemaNode, superTypeSchema, onlyRequired));
return rtn;
}
代码示例来源:origin: alibaba/jvm-sandbox
@Override
public MatchingResult matching(final ClassStructure classStructure) {
final MatchingResult result = new MatchingResult();
if (null == matcherArray) {
return result;
}
for (final Matcher subMatcher : matcherArray) {
result.getBehaviorStructures().addAll(subMatcher.matching(classStructure).getBehaviorStructures());
}
return result;
}
代码示例来源:origin: apache/incubator-druid
@Override
protected void reduce(final BytesWritable key, Iterable<BytesWritable> values, final Context context)
throws IOException, InterruptedException
{
Iterator<BytesWritable> iter = values.iterator();
BytesWritable first = iter.next();
if (iter.hasNext()) {
LinkedHashSet<String> dimOrder = new LinkedHashSet<>();
SortableBytes keyBytes = SortableBytes.fromBytesWritable(key);
Bucket bucket = Bucket.fromGroupKey(keyBytes.getGroupKey()).lhs;
IncrementalIndex index = makeIncrementalIndex(bucket, combiningAggs, config, null, null);
index.add(InputRowSerde.fromBytes(typeHelperMap, first.getBytes(), aggregators));
while (iter.hasNext()) {
context.progress();
InputRow value = InputRowSerde.fromBytes(typeHelperMap, iter.next().getBytes(), aggregators);
if (!index.canAppendRow()) {
dimOrder.addAll(index.getDimensionOrder());
log.info("current index full due to [%s]. creating new index.", index.getOutOfRowsReason());
flushIndexToContextAndClose(key, index, context);
index = makeIncrementalIndex(bucket, combiningAggs, config, dimOrder, index.getColumnCapabilities());
}
index.add(value);
}
flushIndexToContextAndClose(key, index, context);
} else {
context.write(key, first);
}
}
代码示例来源:origin: go-lang-plugin-org/go-lang-idea-plugin
@NotNull
private static LinkedHashSet<String> getNamesInContext(PsiElement context) {
if (context == null) return ContainerUtil.newLinkedHashSet();
LinkedHashSet<String> names = ContainerUtil.newLinkedHashSet();
for (GoNamedElement namedElement : PsiTreeUtil.findChildrenOfType(context, GoNamedElement.class)) {
names.add(namedElement.getName());
}
names.addAll(((GoFile)context.getContainingFile()).getImportMap().keySet());
GoFunctionDeclaration functionDeclaration = PsiTreeUtil.getParentOfType(context, GoFunctionDeclaration.class);
GoSignature signature = PsiTreeUtil.getChildOfType(functionDeclaration, GoSignature.class);
for (GoParamDefinition param : PsiTreeUtil.findChildrenOfType(signature, GoParamDefinition.class)) {
names.add(param.getName());
}
return names;
}
}
代码示例来源:origin: hibernate/hibernate-orm
public Builder apply(DatabaseMetaData databaseMetaData) throws SQLException {
connectionCatalogName = databaseMetaData.getConnection().getCatalog();
// NOTE : databaseMetaData.getConnection().getSchema() would require java 1.7 as baseline
supportsRefCursors = StandardRefCursorSupport.supportsRefCursors( databaseMetaData );
supportsNamedParameters = databaseMetaData.supportsNamedParameters();
supportsScrollableResults = databaseMetaData.supportsResultSetType( ResultSet.TYPE_SCROLL_INSENSITIVE );
supportsGetGeneratedKeys = databaseMetaData.supportsGetGeneratedKeys();
supportsBatchUpdates = databaseMetaData.supportsBatchUpdates();
supportsDataDefinitionInTransaction = !databaseMetaData.dataDefinitionIgnoredInTransactions();
doesDataDefinitionCauseTransactionCommit = databaseMetaData.dataDefinitionCausesTransactionCommit();
extraKeywords = parseKeywords( databaseMetaData.getSQLKeywords() );
sqlStateType = SQLStateType.interpretReportedSQLStateType( databaseMetaData.getSQLStateType() );
lobLocatorUpdateCopy = databaseMetaData.locatorsUpdateCopy();
typeInfoSet = new LinkedHashSet<TypeInfo>();
typeInfoSet.addAll( TypeInfo.extractTypeInfo( databaseMetaData ) );
return this;
}
内容来源于网络,如有侵权,请联系作者删除!