本文整理了Java中org.elasticsearch.hadoop.util.Assert.isTrue()
方法的一些代码示例,展示了Assert.isTrue()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Assert.isTrue()
方法的具体详情如下:
包路径:org.elasticsearch.hadoop.util.Assert
类名称:Assert
方法名:isTrue
暂无
代码示例来源:origin: elastic/elasticsearch-hadoop
public static void isTrue(Boolean object) {
isTrue(object, "[Assertion failed] - this argument must be true");
}
代码示例来源:origin: elastic/elasticsearch-hadoop
/**
* Creates a new byte array output stream, with a buffer capacity of
* the specified size, in bytes.
*
* @param size the initial size.
* @throws EsHadoopIllegalArgumentException if size is negative.
*/
public FastByteArrayOutputStream(int size) {
Assert.isTrue(size >= 0, "Negative initial size: " + size);
data = new BytesArray(size);
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public SearchRequestBuilder scroll(long keepAliveMillis) {
Assert.isTrue(keepAliveMillis > 0, "Invalid scroll");
this.scroll = TimeValue.timeValueMillis(keepAliveMillis);
return this;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public BulkResponse tryFlush() {
Assert.isTrue(writeInitialized, "Cannot flush non-initialized write operation");
return bulkProcessor.tryFlush();
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public void flush() {
Assert.isTrue(writeInitialized, "Cannot flush non-initialized write operation");
bulkProcessor.flush();
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public static Field findField(Class<?> clazz, String name, Class<?> type) {
Assert.notNull(clazz, "Class must not be null");
Assert.isTrue(name != null || type != null, "Either name or type of the field must be specified");
Class<?> searchType = clazz;
while (!Object.class.equals(searchType) && searchType != null) {
Field[] fields = searchType.getDeclaredFields();
for (Field field : fields) {
if ((name == null || name.equals(field.getName())) && (type == null || type.equals(field.getType()))) {
return field;
}
}
searchType = searchType.getSuperclass();
}
return null;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
static QueryBuilder parseURI(String raw) {
// break down the uri into parameters
QueryStringQueryBuilder builder = new QueryStringQueryBuilder();
for (String token : raw.split("&")) {
int indexOf = token.indexOf("=");
Assert.isTrue(indexOf > 0, String.format("Cannot token [%s] in uri query [%s]", token, raw));
String name = token.substring(0, indexOf);
String value = token.substring(indexOf + 1);
applyURIParameter(builder, name, value);
}
return builder;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public void compile(String pattern) {
this.pattern = pattern;
// break it down into index/type
String[] split = pattern.split("/");
Assert.isTrue(!ObjectUtils.isEmpty(split), "invalid pattern given " + pattern);
// check pattern
hasPattern = pattern.contains("{") && pattern.contains("}");
index = parse(split[0].trim());
if (split.length > 1) {
// Assert the pattern is only at most 2, and at the least 1
Assert.isTrue(split.length == 2, "invalid pattern given " + pattern);
type = parse(split[1].trim());
} else {
type = null;
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
protected boolean id(List<Object> list, boolean commaMightBeNeeded) {
boolean added = super.id(list, commaMightBeNeeded);
Assert.isTrue(added, String.format("Operation [%s] requires an id but none was given/found", getOperation()));
return added;
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public void setSettings(Settings settings) {
this.settings = settings;
String paramString = settings.getUpdateScriptParams();
List<String> fields = StringUtils.tokenize(paramString);
for (String string : fields) {
List<String> param = StringUtils.tokenize(string, ":");
Assert.isTrue(param.size() == 2, "Invalid param definition " + string);
params.put(param.get(0), createFieldExtractor(param.get(1)));
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
protected List<Object> parse(String string) {
// break it down into fields
List<Object> template = new ArrayList<Object>();
while (string.contains("{")) {
int startPattern = string.indexOf("{");
template.add(string.substring(0, startPattern));
int endPattern = string.indexOf("}");
Assert.isTrue(endPattern > startPattern + 1, "Invalid pattern given " + string);
String nestedString = string.substring(startPattern + 1, endPattern);
int separator = nestedString.indexOf(FORMAT_SEPARATOR);
if (separator > 0) {
Assert.isTrue(nestedString.length() > separator + 1, "Invalid format given " + nestedString);
String format = nestedString.substring(separator + 1);
nestedString = nestedString.substring(0, separator);
template.add(wrapWithFormatter(format, createFieldExtractor(nestedString)));
}
else {
template.add(createFieldExtractor(nestedString));
}
string = string.substring(endPattern + 1).trim();
}
if (StringUtils.hasText(string)) {
template.add(string);
}
return template;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public static void checkIdForOperation(Settings settings) {
String operation = settings.getOperation();
if (ConfigurationOptions.ES_OPERATION_UPDATE.equals(operation)) {
Assert.isTrue(StringUtils.hasText(settings.getMappingId()),
String.format("Operation [%s] requires an id but none (%s) was specified", operation, ConfigurationOptions.ES_MAPPING_ID));
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public void convert(Object from, BytesArray to) {
Assert.isTrue(from == null || from instanceof Tuple,
String.format("Unexpected object type, expecting [%s], given [%s]", Tuple.class, from.getClass()));
// handle common cases
Tuple tuple = (Tuple) from;
if (tuple == null || tuple.size() == 0) {
to.bytes("{}");
return;
}
Assert.isTrue(tuple.size() == 1, "When using JSON input, only one field is expected");
super.convert(tuple.getValue(0), to);
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public void convert(Object from, BytesArray to) {
Assert.isTrue(from instanceof HiveType,
String.format("Unexpected object type, expecting [%s], given [%s]", HiveType.class, from.getClass()));
ObjectInspector oi = ht.getObjectInspector();
Assert.isTrue(Category.STRUCT == oi.getCategory(),
String.format("Unexpected object category, expecting [%s], given [%s]", Category.STRUCT, oi.getTypeName()));
Assert.isTrue(refs.size() == 1, "When using JSON input, only one field is expected");
Assert.isTrue(Category.PRIMITIVE == foi.getCategory(),
String.format("Unexpected object category, expecting [%s], given [%s]", Category.PRIMITIVE, oi.getTypeName()));
代码示例来源:origin: elastic/elasticsearch-hadoop
private Writable wrapJsonData(Writable blob) {
Assert.isTrue(blob instanceof Text, "Property `es.output.json` is enabled, but returned data was not of type Text...");
switch (structTypeInfo.getCategory()) {
case STRUCT:
Map<Writable, Writable> mapContainer = new MapWritable();
mapContainer.put(jsonFieldName, blob);
return (Writable) mapContainer;
default:
throw new EsHadoopIllegalStateException("Could not correctly wrap JSON data for structural type " + structTypeInfo.getCategory());
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public void convert(Object from, BytesArray to) {
// expect a tuple holding one field - chararray or bytearray
Assert.isTrue(from instanceof SinkCall,
String.format("Unexpected object type, expecting [%s], given [%s]", SinkCall.class, from.getClass()));
// handle common cases
SinkCall sinkCall = (SinkCall) from;
Tuple rawTuple = sinkCall.getOutgoingEntry().getTuple();
if (rawTuple == null || rawTuple.isEmpty()) {
to.bytes("{}");
return;
}
Assert.isTrue(rawTuple.size() == 1, "When using JSON input, only one field is expected");
// postpone the coercion
Tuple tuple = CascadingUtils.coerceToString(sinkCall);
super.convert(tuple.getObject(0), to);
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public RestRepository(Settings settings) {
this.settings = settings;
this.resources = new Resources(settings);
// Check if we have a read resource first, and if not, THEN check the write resource
// The write resource has more strict parsing rules, and if the process is only reading
// with a resource that isn't good for writing, then eagerly parsing the resource as a
// write resource can erroneously throw an error. Instead, we should just get the write
// resource lazily as needed.
Assert.isTrue(resources.getResourceRead() != null || resources.getResourceWrite() != null, "Invalid configuration - No read or write resource specified");
this.client = new RestClient(settings);
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
protected Object extractField(Object target) {
List<String> flNames = fieldNames;
for (int i = 0; i < flNames.size(); i++) {
String fl = flNames.get(i);
if (target instanceof HiveType) {
HiveType type = (HiveType) target;
ObjectInspector inspector = type.getObjectInspector();
if (inspector instanceof StructObjectInspector) {
StructObjectInspector soi = (StructObjectInspector) inspector;
StructField field = soi.getStructFieldRef(fl);
ObjectInspector foi = field.getFieldObjectInspector();
Assert.isTrue(foi.getCategory() == ObjectInspector.Category.PRIMITIVE,
String.format("Field [%s] needs to be a primitive; found [%s]", fl, foi.getTypeName()));
// expecting a writeable - simply do a toString
target = soi.getStructFieldData(type.getObject(), field);
}
else {
return FieldExtractor.NOT_FOUND;
}
}
else {
return FieldExtractor.NOT_FOUND;
}
}
if (target == null || target instanceof NullWritable) {
return StringUtils.EMPTY;
}
return target.toString();
}
代码示例来源:origin: elastic/elasticsearch-hadoop
HeartBeat(final Progressable progressable, Configuration cfg, TimeValue lead, final Log log) {
Assert.notNull(progressable, "a valid progressable is required to report status to Hadoop");
TimeValue tv = HadoopCfgUtils.getTaskTimeout(cfg);
Assert.isTrue(tv.getSeconds() <= 0 || tv.getSeconds() > lead.getSeconds(), "Hadoop timeout is shorter than the heartbeat");
this.progressable = progressable;
long cfgMillis = (tv.getMillis() > 0 ? tv.getMillis() : 0);
// the task is simple hence the delay = timeout - lead, that is when to start the notification right before the timeout
this.delay = new TimeValue(Math.abs(cfgMillis - lead.getMillis()), TimeUnit.MILLISECONDS);
this.log = log;
String taskId;
TaskID taskID = HadoopCfgUtils.getTaskID(cfg);
if (taskID == null) {
log.warn("Cannot determine task id...");
taskId = "<unknown>";
if (log.isTraceEnabled()) {
log.trace("Current configuration is " + HadoopCfgUtils.asProperties(cfg));
}
}
else {
taskId = "" + taskID;
}
id = taskId;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
/**
* Writes the objects to index.
*
* @param ba The data as a bytes array
*/
public void writeProcessedToIndex(BytesArray ba) {
Assert.notNull(ba, "no data given");
Assert.isTrue(ba.length() > 0, "no data given");
lazyInitWriting();
trivialBytesRef.reset();
trivialBytesRef.add(ba);
doWriteToIndex(trivialBytesRef);
}
内容来源于网络,如有侵权,请联系作者删除!