本文整理了Java中org.apache.lucene.analysis.Token.termText()
方法的一些代码示例,展示了Token.termText()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Token.termText()
方法的具体详情如下:
包路径:org.apache.lucene.analysis.Token
类名称:Token
方法名:termText
暂无
代码示例来源:origin: riotfamily/riot
protected Query createFieldTokenQuery(String field, Token token) {
return new PrefixQuery(new Term(field, token.termText()));
}
代码示例来源:origin: com.atlassian.studio/studio-theme-jira-plugin
private static Set getHighlightTerms(String query)
{
final Token[] tokens;
try
{
tokens = parseText(query);
}
catch (final IOException e)
{
return Collections.EMPTY_SET;
}
final Set ret = new HashSet();
for (final Token token : tokens)
{
ret.add(token.termText());
}
return ret;
}
代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core
/** Construct a compound token. */
private Token gramToken(Token first, Token second) {
buffer.setLength(0);
buffer.append(first.termText());
buffer.append(SEPARATOR);
buffer.append(second.termText());
Token result = new Token(buffer.toString(), first.startOffset(), second
.endOffset(), "gram");
result.setPositionIncrement(0);
return result;
}
代码示例来源:origin: lucene/lucene
public QueryTermVector(String queryString, Analyzer analyzer) {
if (analyzer != null)
{
TokenStream stream = analyzer.tokenStream("", new StringReader(queryString));
if (stream != null)
{
Token next = null;
List terms = new ArrayList();
try {
while ((next = stream.next()) != null)
{
terms.add(next.termText());
}
processTerms((String[])terms.toArray(new String[terms.size()]));
} catch (IOException e) {
}
}
}
}
代码示例来源:origin: lucene/lucene
/**
* @return Returns the next token in the stream, or null at EOS
*/
public final Token next()
throws IOException
{
if ( ( token = input.next() ) == null ) {
return null;
}
// Check the exclusiontable
else if ( exclusionSet != null && exclusionSet.contains( token.termText() ) ) {
return token;
}
else {
String s = stemmer.stem( token.termText() );
// If not stemmed, dont waste the time creating a new token
if ( !s.equals( token.termText() ) ) {
return new Token( s, token.startOffset(),
token.endOffset(), token.type() );
}
return token;
}
}
代码示例来源:origin: com.atlassian.studio/studio-theme-jira-plugin
while (token != null)
tokens.add(token.termText());
token = tokenStream.next();
代码示例来源:origin: lucene/lucene
v.addElement(t.termText());
代码示例来源:origin: lucene/lucene
/**
* @return Returns the next token in the stream, or null at EOS
*/
public final Token next() throws IOException
{
if ((token = input.next()) == null)
{
return null;
}
else
{
String s = stemmer.stem(token.termText());
if (!s.equals(token.termText()))
{
return new Token(s, token.startOffset(), token.endOffset(),
token.type());
}
return token;
}
}
代码示例来源:origin: com.atlassian.studio/studio-theme-jira-plugin
for (final Token token : tokens)
if (highlightTerms.contains(token.termText()))
代码示例来源:origin: org.compass-project/compass
private void addAliasesToStack(Token token) {
String[] synonyms = synonymLookupProvider.lookupSynonyms(token.termText());
if (synonyms == null) {
return;
}
for (int i = 0; i < synonyms.length; i++) {
Token synToken = new Token(synonyms[i], token.startOffset(), token.endOffset(), TOKEN_TYPE_SYNONYM);
synToken.setPositionIncrement(0);
synonymStack.addFirst(synToken);
}
}
}
代码示例来源:origin: lucene/lucene
return null;
String text = t.termText();
String type = t.type();
代码示例来源:origin: lucene/lucene
public final Token next() throws java.io.IOException
{
Token t = input.next();
if (t == null)
return null;
String txt = t.termText();
char[] chArray = txt.toCharArray();
for (int i = 0; i < chArray.length; i++)
{
chArray[i] = RussianCharsets.toLowerCase(chArray[i], charset);
}
String newTxt = new String(chArray);
// create new token
Token newToken = new Token(newTxt, t.startOffset(), t.endOffset());
return newToken;
}
}
代码示例来源:origin: com.atlassian.studio/studio-theme-jira-plugin
tokens.add(token.termText());
代码示例来源:origin: org.compass-project/compass
Token offsetToken = new Token(nextToken.termText(),
nextToken.startOffset() + curOffset,
nextToken.endOffset() + curOffset);
代码示例来源:origin: com.atlassian.studio/studio-theme-jira-plugin
final StringBuffer buf = new StringBuffer();
for (int i = 0; i < tokens.length; i++) {
buf.append(tokens[i].termText());
if (i != (tokens.length - 1))
buf.append(", ");
if (containsSearchTerm(tokens[i].termText(), highlight)) {
if (containsSearchTerm(t.termText(), highlight)) {
excerpt.addToken(t.termText());
excerpt.add(new Summary.Fragment(text.substring(offset, t.startOffset())));
excerpt.add(new Summary.Highlight(text.substring(t.startOffset(), t.endOffset())));
代码示例来源:origin: lucene/lucene
for (Token t = stream.next(); t != null; t = stream.next()) {
position += (t.getPositionIncrement() - 1);
addPosition(fieldName, t.termText(), position++);
if (++length > maxFieldLength) break;
内容来源于网络,如有侵权,请联系作者删除!