本文整理了Java中org.apache.lucene.analysis.Token.term()
方法的一些代码示例,展示了Token.term()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Token.term()
方法的具体详情如下:
包路径:org.apache.lucene.analysis.Token
类名称:Token
方法名:term
[英]Returns the Token's term text. This method has a performance penalty because the text is stored internally in a char[]. If possible, use #termBuffer() and #termLength() directly instead. If you really need a String, use this method, which is nothing more than a convenience call to new String(token.termBuffer(), 0, token.termLength())
[中]返回令牌的术语文本。由于文本存储在char[]的内部,此方法会导致性能下降。如果可能,直接使用#termBuffer()和#termLength()。如果您真的需要一个字符串,请使用这个方法,它只不过是对新字符串(token.termBuffer(),0,token)的方便调用。termLength())
代码示例来源:origin: org.apache.lucene/lucene-core-jfrog
public QueryTermVector(String queryString, Analyzer analyzer) {
if (analyzer != null)
{
TokenStream stream = analyzer.tokenStream("", new StringReader(queryString));
if (stream != null)
{
List terms = new ArrayList();
try {
final Token reusableToken = new Token();
for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
terms.add(nextToken.term());
}
processTerms((String[])terms.toArray(new String[terms.size()]));
} catch (IOException e) {
}
}
}
}
代码示例来源:origin: stackoverflow.com
public class NodesAnalyzer extends Analyzer {
public TokenStream tokenStream(String fieldName, Reader reader) {
Tokenizer tokenizer = new StandardTokenizer(reader)
TokenFilter lowerCaseFilter = new LowerCaseFilter(tokenizer)
TokenFilter stopFilter = new StopFilter(lowerCaseFilter, Data.stopWords.collect{ it.text } as String[])
SnowballFilter snowballFilter = new SnowballFilter(stopFilter, new org.tartarus.snowball.ext.ItalianStemmer())
return snowballFilter
}
}
Analyzer analyzer = new NodesAnalyzer()
TokenStream ts = analyzer.tokenStream(null, new StringReader(str));
Token token = ts.next()
while (token != null) {
String cur = token.term()
token = ts.next();
}
代码示例来源:origin: org.apache.lucene/com.springsource.org.apache.lucene
public QueryTermVector(String queryString, Analyzer analyzer) {
if (analyzer != null)
{
TokenStream stream = analyzer.tokenStream("", new StringReader(queryString));
if (stream != null)
{
List terms = new ArrayList();
try {
final Token reusableToken = new Token();
for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
terms.add(nextToken.term());
}
processTerms((String[])terms.toArray(new String[terms.size()]));
} catch (IOException e) {
}
}
}
}
代码示例来源:origin: org.apache.lucene/com.springsource.org.apache.lucene
else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return newTermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
Query currentQuery = newTermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
multiTerms.add(new Term(field, nextToken.term()));
if (enablePositionIncrements) {
position += nextToken.getPositionIncrement();
pq.add(new Term(field, nextToken.term()),position);
} else {
pq.add(new Term(field, nextToken.term()));
代码示例来源:origin: org.apache.lucene/lucene-core-jfrog
else if (list.size() == 1) {
nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return newTermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
nextToken = (org.apache.lucene.analysis.Token) list.get(i);
Query currentQuery = newTermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
multiTerms.add(new Term(field, nextToken.term()));
if (enablePositionIncrements) {
position += nextToken.getPositionIncrement();
pq.add(new Term(field, nextToken.term()),position);
} else {
pq.add(new Term(field, nextToken.term()));
代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core
NamedList<Object> tokenNamedList = new SimpleOrderedMap<Object>();
String text = fieldType.indexedToReadable(token.term());
tokenNamedList.add("text", text);
if (!text.equals(token.term())) {
tokenNamedList.add("raw_text", token.term());
tokenNamedList.add("position", position);
if (context.getTermsToMatch().contains(token.term())) {
tokenNamedList.add("match", true);
代码示例来源:origin: org.compass-project/compass
nextToken = (org.apache.lucene.analysis.Token) list.get(0);
if (boostAll) {
return new AllBoostingTermQuery(getTerm(field, nextToken.term()));
} else {
return newTermQuery(getTerm(field, nextToken.term()));
if (boostAll) {
AllBoostingTermQuery currentQuery = new AllBoostingTermQuery(
getTerm(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
} else {
Query currentQuery = newTermQuery(
getTerm(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
multiTerms.add(getTerm(field, nextToken.term()));
if (enablePositionIncrements) {
position += nextToken.getPositionIncrement();
pq.add(getTerm(field, nextToken.term()),position);
} else {
pq.add(getTerm(field, nextToken.term()));
代码示例来源:origin: org.compass-project/compass
nextToken = (org.apache.lucene.analysis.Token) list.get(0);
if (boostAll) {
return new AllBoostingTermQuery(getTerm(field, nextToken.term()));
} else {
return newTermQuery(getTerm(field, nextToken.term()));
if (boostAll) {
AllBoostingTermQuery currentQuery = new AllBoostingTermQuery(
getTerm(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
} else {
Query currentQuery = newTermQuery(
getTerm(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
multiTerms.add(getTerm(field, nextToken.term()));
if (enablePositionIncrements) {
position += nextToken.getPositionIncrement();
pq.add(getTerm(field, nextToken.term()),position);
} else {
pq.add(getTerm(field, nextToken.term()));
代码示例来源:origin: org.apache.stanbol/org.apache.stanbol.commons.solr.extras.paoding
termAtt.setTermBuffer(token.term());
offsetAtt.setOffset(correctOffset(token.startOffset()),
correctOffset(token.endOffset()));
代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core
List<Token> tokens = analyzeValue(queryValue, fieldType.getQueryAnalyzer());
for (Token token : tokens) {
termsToMatch.add(token.term());
代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core
List<Token> tokens = analyzeValue(request.getQuery(), fieldType.getQueryAnalyzer());
for (Token token : tokens) {
termsToMatch.add(token.term());
内容来源于网络,如有侵权,请联系作者删除!