org.elasticsearch.hadoop.util.Assert.hasText()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(8.9k)|赞(0)|评价(0)|浏览(238)

本文整理了Java中org.elasticsearch.hadoop.util.Assert.hasText()方法的一些代码示例,展示了Assert.hasText()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Assert.hasText()方法的具体详情如下:
包路径:org.elasticsearch.hadoop.util.Assert
类名称:Assert
方法名:hasText

Assert.hasText介绍

暂无

代码示例

代码示例来源:origin: elastic/elasticsearch-hadoop

public SearchRequestBuilder shard(String shard) {
  Assert.hasText(shard, "Invalid shard");
  this.shard = shard;
  return this;
}

代码示例来源:origin: elastic/elasticsearch-hadoop

public MessageTemplate(ElasticCommonSchema schema, Map<String, String> labels, Set<String> tags, HostData host,
            String eventCategory, String eventType) {
  Assert.hasText(eventCategory, "Missing " + FieldNames.FIELD_EVENT_CATEGORY + " value for ECS template.");
  Assert.hasText(eventType, "Missing " + FieldNames.FIELD_EVENT_TYPE + " value for ECS template.");
  this.schema = schema;
  this.labels = labels;
  this.tags = tags;
  this.host = host;
  this.eventCategory = eventCategory;
  this.eventType = eventType;
}

代码示例来源:origin: elastic/elasticsearch-hadoop

public static void hasText(CharSequence sequence) {
  hasText(sequence, "[Assertion failed] - this CharSequence argument must have text; it must not be null, empty, or blank");
}

代码示例来源:origin: elastic/elasticsearch-hadoop

public static String getPinnedNode(Settings settings) {
  String node = settings.getProperty(InternalConfigurationOptions.INTERNAL_ES_PINNED_NODE);
  Assert.hasText(node, "Task has not been pinned to a node...");
  return node;
}

代码示例来源:origin: elastic/elasticsearch-hadoop

public static String getJobTransportPoolingKey(Settings settings) {
  String jobKey = settings.getProperty(InternalConfigurationOptions.INTERNAL_TRANSPORT_POOLING_KEY);
  Assert.hasText(jobKey, "Job has not been assigned a transport pooling key...");
  return jobKey;
}

代码示例来源:origin: elastic/elasticsearch-hadoop

@SuppressWarnings("unchecked")
public static <T> T instantiate(String className, ClassLoader loader) {
  Assert.hasText(className, "No class name given");
  ClassLoader cl = (loader != null ? loader : ObjectUtils.class.getClassLoader());
  Class<?> clz = null;
  try {
    clz = cl.loadClass(className);
  } catch (ClassNotFoundException e) {
    throw new EsHadoopIllegalStateException(String.format("Cannot load class [%s]", className), e);
  }
  try {
    return (T) clz.newInstance();
  } catch (Exception ex) {
    throw new EsHadoopIllegalStateException(String.format("Cannot instantiate class [%s]", className), ex);
  }
}

代码示例来源:origin: elastic/elasticsearch-hadoop

private void init(TableDesc tableDesc, boolean read) {
  Configuration cfg = getConf();
  // NB: we can't just merge the table properties in, we need to save them per input/output otherwise clashes occur which confuse Hive
  Settings settings = HadoopSettingsManager.loadFrom(cfg);
  //settings.setProperty((read ? HiveConstants.INPUT_TBL_PROPERTIES : HiveConstants.OUTPUT_TBL_PROPERTIES), IOUtils.propsToString(tableDesc.getProperties()));
  if (read) {
    // no generic setting
  }
  else {
    // replace the default committer when using the old API
    HadoopCfgUtils.setOutputCommitterClass(cfg, EsOutputFormat.EsOutputCommitter.class.getName());
  }
  Assert.hasText(tableDesc.getProperties().getProperty(TABLE_LOCATION), String.format(
      "no table location [%s] declared by Hive resulting in abnormal execution;", TABLE_LOCATION));
}

代码示例来源:origin: elastic/elasticsearch-hadoop

/**
 * Selects an appropriate field from the given Hive table schema to insert JSON data into if the feature is enabled
 * @param settings Settings to read schema information from
 * @return A FieldAlias object that projects the json source field into the select destination field
 */
static String discoverJsonFieldName(Settings settings, FieldAlias alias) {
  Set<String> virtualColumnsToBeRemoved = new HashSet<String>(HiveConstants.VIRTUAL_COLUMNS.length);
  Collections.addAll(virtualColumnsToBeRemoved, HiveConstants.VIRTUAL_COLUMNS);
  List<String> columnNames = StringUtils.tokenize(settings.getProperty(HiveConstants.COLUMNS), ",");
  Iterator<String> nameIter = columnNames.iterator();
  List<String> columnTypes = StringUtils.tokenize(settings.getProperty(HiveConstants.COLUMNS_TYPES), ":");
  Iterator<String> typeIter = columnTypes.iterator();
  String candidateField = null;
  while(nameIter.hasNext() && candidateField == null) {
    String columnName = nameIter.next();
    String type = typeIter.next();
    if ("string".equalsIgnoreCase(type) && !virtualColumnsToBeRemoved.contains(columnName)) {
      candidateField = columnName;
    }
  }
  Assert.hasText(candidateField, "Could not identify a field to insert JSON data into " +
      "from the given fields : {" + columnNames + "} of types {" + columnTypes + "}");
  // If the candidate field is aliased to something else, find the alias name and use that for the field name:
  candidateField = alias.toES(candidateField);
  return candidateField;
}

代码示例来源:origin: elastic/elasticsearch-hadoop

/**
 * Field name to query from this level of an object
 */
public Query get(String field) {
  Assert.hasText(field, "Cannot query empty field name");
  return new Query(field, this);
}

代码示例来源:origin: elastic/elasticsearch-hadoop

private void init(Configuration cfg) throws IOException {
    Settings settings = HadoopSettingsManager.loadFrom(cfg);
    Assert.hasText(settings.getResourceWrite(), String.format("No resource ['%s'] (index/query/location) specified", ES_RESOURCE));

    // Need to discover the ESVersion before checking if index exists.
    InitializationUtils.discoverEsVersion(settings, log);
    InitializationUtils.checkIdForOperation(settings);
    InitializationUtils.checkIndexExistence(settings);

    if (HadoopCfgUtils.getReduceTasks(cfg) != null) {
      if (HadoopCfgUtils.getSpeculativeReduce(cfg)) {
        log.warn("Speculative execution enabled for reducer - consider disabling it to prevent data corruption");
      }
    }
    else {
      if (HadoopCfgUtils.getSpeculativeMap(cfg)) {
        log.warn("Speculative execution enabled for mapper - consider disabling it to prevent data corruption");
      }
    }

    //log.info(String.format("Starting to write/index to [%s][%s]", settings.getTargetUri(), settings.getTargetResource()));
  }
}

代码示例来源:origin: elastic/elasticsearch-hadoop

Assert.hasText(resource, errorMessage + resource);
  type = res.substring(slash + 1);
  typed = true;
  Assert.hasText(type, "No type found; expecting [index]/[type]");
} else {
  index = res;
  typed = false;
Assert.hasText(index, "No index found; expecting [index]/[type]");
Assert.isTrue(!StringUtils.hasWhitespace(index) && !StringUtils.hasWhitespace(type), "Index/type should not contain whitespaces");

代码示例来源:origin: elastic/elasticsearch-hadoop

Assert.hasText(clientSettings.getResourceWrite(), "Could not locate write resource for ES error handler.");

代码示例来源:origin: elastic/elasticsearch-hadoop

public BytesArray generateMessage(String ts, String message, String exceptionType, String exceptionMessage, String rawEvent) {
  Assert.hasText(ts, "Missing " + FieldNames.FIELD_TIMESTAMP + " value for ECS template.");
  Assert.hasText(message, "Missing " + FieldNames.FIELD_MESSAGE + " value for ECS template.");
  Assert.hasText(exceptionType, "Missing " + FieldNames.FIELD_ERROR_CODE + " value for ECS template.");
  Assert.hasText(exceptionMessage, "Missing " + FieldNames.FIELD_ERROR_MESSAGE + " value for ECS template.");
  Assert.hasText(rawEvent, "Missing " + FieldNames.FIELD_EVENT_RAW + " value for ECS template.");

代码示例来源:origin: org.elasticsearch/elasticsearch-spark

public QueryBuilder node(String node) {
  Assert.hasText(node, "Invalid node");
  this.node = node;
  return this;
}

代码示例来源:origin: org.elasticsearch/elasticsearch-hadoop

public SearchRequestBuilder shard(String shard) {
  Assert.hasText(shard, "Invalid shard");
  this.shard = shard;
  return this;
}

代码示例来源:origin: org.elasticsearch/elasticsearch-spark-13

public MessageTemplate(ElasticCommonSchema schema, Map<String, String> labels, Set<String> tags, HostData host,
            String eventCategory, String eventType) {
  Assert.hasText(eventCategory, "Missing " + FieldNames.FIELD_EVENT_CATEGORY + " value for ECS template.");
  Assert.hasText(eventType, "Missing " + FieldNames.FIELD_EVENT_TYPE + " value for ECS template.");
  this.schema = schema;
  this.labels = labels;
  this.tags = tags;
  this.host = host;
  this.eventCategory = eventCategory;
  this.eventType = eventType;
}

代码示例来源:origin: org.elasticsearch/elasticsearch-spark-13

public SearchRequestBuilder shard(String shard) {
  Assert.hasText(shard, "Invalid shard");
  this.shard = shard;
  return this;
}

代码示例来源:origin: org.elasticsearch/elasticsearch-hadoop

public static String getPinnedNode(Settings settings) {
  String node = settings.getProperty(InternalConfigurationOptions.INTERNAL_ES_PINNED_NODE);
  Assert.hasText(node, "Task has not been pinned to a node...");
  return node;
}

代码示例来源:origin: org.elasticsearch/elasticsearch-hadoop-mr

public static String getJobTransportPoolingKey(Settings settings) {
  String jobKey = settings.getProperty(InternalConfigurationOptions.INTERNAL_TRANSPORT_POOLING_KEY);
  Assert.hasText(jobKey, "Job has not been assigned a transport pooling key...");
  return jobKey;
}

代码示例来源:origin: org.elasticsearch/elasticsearch-spark-13

public static String getPinnedNode(Settings settings) {
  String node = settings.getProperty(InternalConfigurationOptions.INTERNAL_ES_PINNED_NODE);
  Assert.hasText(node, "Task has not been pinned to a node...");
  return node;
}

相关文章