本文整理了Java中org.apache.lucene.index.Term.text()
方法的一些代码示例,展示了Term.text()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Term.text()
方法的具体详情如下:
包路径:org.apache.lucene.index.Term
类名称:Term
方法名:text
[英]Returns the text of this term. In the case of words, this is simply the text of the word. In the case of dates and other types, this is an encoding of the object as a string.
[中]返回此术语的文本。就单词而言,这只是单词的文本。对于日期和其他类型,这是将对象编码为字符串。
代码示例来源:origin: org.apache.lucene/lucene-core
/** Prints a user-readable version of this query. */
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
buffer.append(term.field());
buffer.append(":");
}
buffer.append(term.text());
return buffer.toString();
}
代码示例来源:origin: org.apache.lucene/lucene-core
@Override
public String toString(String field) {
final StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
buffer.append(term.field());
buffer.append(":");
}
buffer.append(term.text());
buffer.append('~');
buffer.append(Integer.toString(maxEdits));
return buffer.toString();
}
代码示例来源:origin: org.apache.lucene/lucene-core
/** Prints a user-readable version of this query. */
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
buffer.append(term.field());
buffer.append(":");
}
buffer.append('/');
buffer.append(term.text());
buffer.append('/');
return buffer.toString();
}
}
代码示例来源:origin: org.apache.lucene/lucene-core
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
if (term.field().equals(field))
buffer.append(term.text());
else
buffer.append(term.toString());
return buffer.toString();
}
代码示例来源:origin: oracle/opengrok
private void getSuggestion(Term term, IndexReader ir,
List<String> result) throws IOException {
if (term == null) {
return;
}
String[] toks = TABSPACE.split(term.text(), 0);
for (String tok : toks) {
//TODO below seems to be case insensitive ... for refs/defs this is bad
SuggestWord[] words = checker.suggestSimilar(new Term(term.field(), tok),
SPELLCHECK_SUGGEST_WORD_COUNT, ir, SuggestMode.SUGGEST_ALWAYS);
for (SuggestWord w : words) {
result.add(w.string);
}
}
}
代码示例来源:origin: tjake/Solandra
public static byte[] createColumnName(String field, String text)
{
// case of all terms
if (field.equals("") || text == null)
return delimeterBytes;
try
{
return (field + delimeter + text).getBytes("UTF-8");
}
catch (UnsupportedEncodingException e)
{
throw new RuntimeException("JVM doesn't support UTF-8", e);
}
}
代码示例来源:origin: oracle/opengrok
/**
* Increments search count for {@code term} by {@code value}.
* @param term term for which to increment search count
* @param value value to increment by
*/
public void incrementSearchCount(final Term term, final int value) {
if (term == null) {
throw new IllegalArgumentException("Cannot increment search count for null");
}
boolean gotLock = lock.readLock().tryLock();
if (!gotLock) { // do not wait for rebuild
return;
}
try {
if (lookups.get(term.field()).get(term.text()) == null) {
return; // unknown term
}
PopularityMap map = searchCountMaps.get(term.field());
if (map != null) {
map.increment(term.bytes(), value);
}
} finally {
lock.readLock().unlock();
}
}
代码示例来源:origin: tjake/Solandra
public LucandraTermInfo[] loadFilteredTerms(Term term, List<ByteBuffer> docNums) throws IOException
{
long start = System.currentTimeMillis();
ColumnParent parent = new ColumnParent();
parent.setColumn_family(CassandraUtils.termVecColumnFamily);
ByteBuffer key;
try
{
key = CassandraUtils.hashKeyBytes(indexName.getBytes("UTF-8"), CassandraUtils.delimeterBytes, term.field()
.getBytes("UTF-8"), CassandraUtils.delimeterBytes, term.text().getBytes("UTF-8"));
}
catch (UnsupportedEncodingException e2)
{
throw new RuntimeException("JVM doesn't support UTF-8", e2);
}
ReadCommand rc = new SliceByNamesReadCommand(CassandraUtils.keySpace, key, parent, docNums);
List<Row> rows = CassandraUtils.robustRead(CassandraUtils.consistency, rc);
LucandraTermInfo[] termInfo = null;
if (rows != null && rows.size() > 0 && rows.get(0) != null && rows.get(0).cf != null)
{
termInfo = TermCache.convertTermInfo(rows.get(0).cf.getSortedColumns());
}
long end = System.currentTimeMillis();
if (logger.isDebugEnabled())
logger.debug("loadFilterdTerms: " + term + "(" + termInfo == null ? 0 : termInfo.length + ") took "
+ (end - start) + "ms");
return termInfo;
}
代码示例来源:origin: tjake/Solandra
public void deleteDocuments(String indexName, Term term, boolean autoCommit) throws CorruptIndexException,
IOException
{
ColumnParent cp = new ColumnParent(CassandraUtils.termVecColumnFamily);
ByteBuffer key = CassandraUtils.hashKeyBytes(indexName.getBytes("UTF-8"), CassandraUtils.delimeterBytes, term
.field().getBytes("UTF-8"), CassandraUtils.delimeterBytes, term.text().getBytes("UTF-8"));
ReadCommand rc = new SliceFromReadCommand(CassandraUtils.keySpace, key, cp, ByteBufferUtil.EMPTY_BYTE_BUFFER,
ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE);
List<Row> rows = CassandraUtils.robustRead(CassandraUtils.consistency, rc);
// delete by documentId
for (Row row : rows)
{
if (row.cf != null)
{
Collection<IColumn> columns = row.cf.getSortedColumns();
for (IColumn col : columns)
{
deleteLucandraDocument(indexName, CassandraUtils.readVInt(col.name()), autoCommit);
}
}
}
}
代码示例来源:origin: tjake/Solandra
logger.debug("Found " + columns.size() + " terms under field " + startTerm.field());
.field().getBytes("UTF-8"), CassandraUtils.delimeterBytes, term.text().getBytes("UTF-8"));
代码示例来源:origin: tjake/Solandra
Term term = new Term(field.name(), termAttribute.toString());
ThriftTerm tterm = new ThriftTerm(term.field()).setText(
ByteBuffer.wrap(term.text().getBytes("UTF-8"))).setIs_binary(false);
.getKey().field().getBytes("UTF-8"), CassandraUtils.delimeterBytes, term.getKey().text()
.getBytes("UTF-8"));
代码示例来源:origin: org.apache.lucene/lucene-core
@Override
public Spans getSpans(final LeafReaderContext context, Postings requiredPostings) throws IOException {
assert termContext.wasBuiltFor(ReaderUtil.getTopLevelContext(context)) : "The top-reader used to create Weight is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
final TermState state = termContext.get(context.ord);
if (state == null) { // term is not present in that reader
assert context.reader().docFreq(term) == 0 : "no termstate found but term exists in reader term=" + term;
return null;
}
final Terms terms = context.reader().terms(term.field());
if (terms == null)
return null;
if (terms.hasPositions() == false)
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run SpanTermQuery (term=" + term.text() + ")");
final TermsEnum termsEnum = terms.iterator();
termsEnum.seekExact(term.bytes(), state);
final PostingsEnum postings = termsEnum.postings(null, requiredPostings.getRequiredPostings());
float positionsCost = termPositionsCost(termsEnum) * PHRASE_TO_SPAN_TERM_POSITIONS_COST;
return new TermSpans(getSimScorer(context), postings, term, positionsCost);
}
}
代码示例来源:origin: tjake/Solandra
if (term == null || term.field() != field)
break;
byte termval = parser.parseByte(term.text());
termDocs.seek(termEnum);
while (termDocs.next())
代码示例来源:origin: tjake/Solandra
if (term == null || term.field() != field)
break;
short termval = parser.parseShort(term.text());
termDocs.seek(termEnum);
while (termDocs.next())
代码示例来源:origin: tjake/Solandra
if (term == null || term.field() != field)
break;
int termval = parser.parseInt(term.text());
if (retArray == null) // late init
retArray = new int[reader.maxDoc()];
代码示例来源:origin: tjake/Solandra
if (term == null || term.field() != field)
break;
float termval = parser.parseFloat(term.text());
if (retArray == null) // late init
retArray = new float[reader.maxDoc()];
代码示例来源:origin: org.elasticsearch/elasticsearch
@Override
public long estimateSize() {
return ((uid.field().length() + uid.text().length()) * 2) + 20;
}
代码示例来源:origin: linkedin/indextank-engine
private QueryNode internalParsePhraseQuery(Term[] terms, int[] positions, final String originalStr) {
Preconditions.checkArgument(terms.length > 0, "too few terms to build a phrase query");
String[] strs = new String[terms.length];
for (int i = 0; i < terms.length; i++) {
strs[i] = terms[i].text();
}
return new SimplePhraseQuery(terms[0].field(), strs, positions);
}
代码示例来源:origin: org.elasticsearch/elasticsearch
@Override
public int estimatedSizeInBytes() {
return (uid().field().length() + uid().text().length()) * 2 + 20;
}
代码示例来源:origin: linkedin/indextank-engine
private QueryNode internalParse(org.apache.lucene.search.Query luceneQuery, final String originalStr) throws ParseException {
QueryNode node;
if (luceneQuery instanceof org.apache.lucene.search.TermQuery) {
Term t = ((org.apache.lucene.search.TermQuery) luceneQuery).getTerm();
String field = t.field();
String text = t.text();
node = new TermQuery(field, text);
} else if (luceneQuery instanceof org.apache.lucene.search.PrefixQuery) {
Term t = ((org.apache.lucene.search.PrefixQuery) luceneQuery).getPrefix();
String field = t.field();
String text = t.text();
node = new PrefixTermQuery(field, text);
} else if (luceneQuery instanceof org.apache.lucene.search.BooleanQuery) {
List<BooleanClause> clauses = ((org.apache.lucene.search.BooleanQuery) luceneQuery).clauses();
if (clauses.isEmpty()) {
throw new ParseException("error parsing: " + originalStr);
}
node = internalParseBooleanQuery(clauses, originalStr);
} else if (luceneQuery instanceof org.apache.lucene.search.PhraseQuery) {
org.apache.lucene.search.PhraseQuery phraseQuery = (org.apache.lucene.search.PhraseQuery) luceneQuery;
int[] positions = phraseQuery.getPositions();
node = internalParsePhraseQuery(phraseQuery.getTerms(), positions, originalStr);
} else {
throw new ParseException("unimplemented");
}
node.setBoost(luceneQuery.getBoost());
return node;
}
内容来源于网络,如有侵权,请联系作者删除!