本文整理了Java中org.apache.lucene.analysis.Token.<init>()
方法的一些代码示例,展示了Token.<init>()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Token.<init>()
方法的具体详情如下:
包路径:org.apache.lucene.analysis.Token
类名称:Token
方法名:<init>
[英]Constructs a Token will null text.
[中]构造一个标记将为空文本。
代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core
/**
* Constructor
*
* @param input must be a CommonGramsFilter!
*
*/
public CommonGramsQueryFilter(CommonGramsFilter input) {
super(input);
prev = new Token();
}
代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core
/** Produces a List<Token> from a List<String> */
public static List<Token> makeTokens(List<String> strings) {
List<Token> ret = new ArrayList<Token>(strings.size());
for (String str : strings) {
//Token newTok = new Token(str,0,0,"SYNONYM");
Token newTok = new Token(0,0,"SYNONYM");
newTok.setTermBuffer(str.toCharArray(), 0, str.length());
ret.add(newTok);
}
return ret;
}
代码示例来源:origin: gncloud/fastcatsearch
@Override
public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
return attClass.isAssignableFrom(Token.class)
? new Token() : delegate.createAttributeInstance(attClass);
}
代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core
public void reset() throws IOException {
super.reset();
prev = new Token();
}
代码示例来源:origin: org.apache.lucene/lucene-core-jfrog
/** Makes a clone, but replaces the term buffer &
* start/end offset in the process. This is more
* efficient than doing a full clone (and then calling
* setTermBuffer) because it saves a wasted copy of the old
* termBuffer. */
public Token clone(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset) {
final Token t = new Token(newTermBuffer, newTermOffset, newTermLength, newStartOffset, newEndOffset);
t.positionIncrement = positionIncrement;
t.flags = flags;
t.type = type;
if (payload != null)
t.payload = (Payload) payload.clone();
return t;
}
代码示例来源:origin: org.apache.lucene/com.springsource.org.apache.lucene
/** Makes a clone, but replaces the term buffer &
* start/end offset in the process. This is more
* efficient than doing a full clone (and then calling
* setTermBuffer) because it saves a wasted copy of the old
* termBuffer. */
public Token clone(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset) {
final Token t = new Token(newTermBuffer, newTermOffset, newTermLength, newStartOffset, newEndOffset);
t.positionIncrement = positionIncrement;
t.flags = flags;
t.type = type;
if (payload != null)
t.payload = (Payload) payload.clone();
return t;
}
代码示例来源:origin: gncloud/fastcatsearch
/** Makes a clone, but replaces the term buffer &
* start/end offset in the process. This is more
* efficient than doing a full clone (and then calling
* {@link #copyBuffer}) because it saves a wasted copy of the old
* termBuffer. */
public Token clone(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset) {
final Token t = new Token(newTermBuffer, newTermOffset, newTermLength, newStartOffset, newEndOffset);
t.positionIncrement = positionIncrement;
t.flags = flags;
t.type = type;
if (payload != null)
t.payload = payload.clone();
return t;
}
代码示例来源:origin: org.compass-project/compass
public AllTermsCache(CompassSettings settings, CompassMapping mapping) {
for (ResourceMapping resourceMapping : mapping.getRootMappings()) {
if (resourceMapping.getAllMapping().isExcludeAlias()) {
aliasTokensPerRootAlias.put(resourceMapping.getAlias(), new Token[0]);
} else {
List<Token> aliasTokens = new ArrayList<Token>();
aliasTokens.add(new Token(resourceMapping.getAlias().toLowerCase(), 0, resourceMapping.getAlias().length()));
for (String extendedAlias : resourceMapping.getExtendedAliases()) {
aliasTokens.add(new Token(extendedAlias.toLowerCase(), 0, extendedAlias.length()));
}
aliasTokensPerRootAlias.put(resourceMapping.getAlias(), aliasTokens.toArray(new Token[aliasTokens.size()]));
}
}
}
代码示例来源:origin: org.dspace.dependencies.solr/dspace-solr-core
/** Construct a compound token. */
private Token gramToken(Token first, Token second) {
buffer.setLength(0);
buffer.append(first.termText());
buffer.append(SEPARATOR);
buffer.append(second.termText());
Token result = new Token(buffer.toString(), first.startOffset(), second
.endOffset(), "gram");
result.setPositionIncrement(0);
return result;
}
代码示例来源:origin: DiceTechJobs/SolrPlugins
private Token newToken(Token existing, String newText){
return new Token(newText, existing.startOffset(), existing.endOffset());
}
代码示例来源:origin: org.apache.lucene/lucene-core-jfrog
public QueryTermVector(String queryString, Analyzer analyzer) {
if (analyzer != null)
{
TokenStream stream = analyzer.tokenStream("", new StringReader(queryString));
if (stream != null)
{
List terms = new ArrayList();
try {
final Token reusableToken = new Token();
for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
terms.add(nextToken.term());
}
processTerms((String[])terms.toArray(new String[terms.size()]));
} catch (IOException e) {
}
}
}
}
代码示例来源:origin: org.apache.lucene/com.springsource.org.apache.lucene
public QueryTermVector(String queryString, Analyzer analyzer) {
if (analyzer != null)
{
TokenStream stream = analyzer.tokenStream("", new StringReader(queryString));
if (stream != null)
{
List terms = new ArrayList();
try {
final Token reusableToken = new Token();
for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
terms.add(nextToken.term());
}
processTerms((String[])terms.toArray(new String[terms.size()]));
} catch (IOException e) {
}
}
}
}
代码示例来源:origin: ajermakovics/eclipse-instasearch
private void splitIntoTokens()
{
String term = termAtt.term();
String[] termParts = splitTerm(term);
if(termParts.length > 1)
{
int termPos = offsetAtt.startOffset();
for (int i = 0; i < termParts.length; i++)
{
String termPart = termParts[i];
int termPartPos = termPos + term.indexOf(termPart);
int termPartEndPos = termPartPos + termPart.length();
Token newToken = new Token(termPart, termPartPos, termPartEndPos);
newToken.setPositionIncrement(0); // in the same position
tokens.add( newToken );
}
}
}
代码示例来源:origin: hibernate/hibernate-search
public static Token[] tokensFromAnalysis(Analyzer analyzer, String field, String text) throws IOException {
final List<Token> tokenList = new ArrayList<Token>();
final TokenStream stream = analyzer.tokenStream( field, new StringReader( text ) );
try {
CharTermAttribute term = stream.addAttribute( CharTermAttribute.class );
stream.reset();
while ( stream.incrementToken() ) {
Token token = new Token();
token.copyBuffer( term.buffer(), 0, term.length() );
tokenList.add( token );
}
stream.end();
}
finally {
stream.close();
}
return tokenList.toArray( new Token[tokenList.size()] );
}
代码示例来源:origin: org.infinispan/infinispan-embedded-query
public static Token[] tokensFromAnalysis(Analyzer analyzer, String field, String text) throws IOException {
final List<Token> tokenList = new ArrayList<Token>();
final TokenStream stream = analyzer.tokenStream( field, new StringReader( text ) );
try {
CharTermAttribute term = stream.addAttribute( CharTermAttribute.class );
stream.reset();
while ( stream.incrementToken() ) {
Token token = new Token();
token.copyBuffer( term.buffer(), 0, term.length() );
tokenList.add( token );
}
stream.end();
}
finally {
stream.close();
}
return tokenList.toArray( new Token[tokenList.size()] );
}
代码示例来源:origin: org.compass-project/compass
private void addAliasesToStack(Token token) {
String[] synonyms = synonymLookupProvider.lookupSynonyms(token.termText());
if (synonyms == null) {
return;
}
for (int i = 0; i < synonyms.length; i++) {
Token synToken = new Token(synonyms[i], token.startOffset(), token.endOffset(), TOKEN_TYPE_SYNONYM);
synToken.setPositionIncrement(0);
synonymStack.addFirst(synToken);
}
}
}
代码示例来源:origin: lucene/lucene
public final Token next() throws java.io.IOException
{
Token t = input.next();
if (t == null)
return null;
String txt = t.termText();
char[] chArray = txt.toCharArray();
for (int i = 0; i < chArray.length; i++)
{
chArray[i] = RussianCharsets.toLowerCase(chArray[i], charset);
}
String newTxt = new String(chArray);
// create new token
Token newToken = new Token(newTxt, t.startOffset(), t.endOffset());
return newToken;
}
}
代码示例来源:origin: org.apache.lucene/lucene-core-jfrog
/** Returns the next token in the stream, or null at EOS.
* @deprecated The returned Token is a "full private copy" (not
* re-used across calls to next()) but will be slower
* than calling {@link #next(Token)} instead.. */
public Token next() throws IOException {
final Token reusableToken = new Token();
Token nextToken = next(reusableToken);
if (nextToken != null) {
Payload p = nextToken.getPayload();
if (p != null) {
nextToken.setPayload((Payload) p.clone());
}
}
return nextToken;
}
代码示例来源:origin: org.apache.lucene/com.springsource.org.apache.lucene
/** Returns the next token in the stream, or null at EOS.
* @deprecated The returned Token is a "full private copy" (not
* re-used across calls to next()) but will be slower
* than calling {@link #next(Token)} instead.. */
public Token next() throws IOException {
final Token reusableToken = new Token();
Token nextToken = next(reusableToken);
if (nextToken != null) {
Payload p = nextToken.getPayload();
if (p != null) {
nextToken.setPayload((Payload) p.clone());
}
}
return nextToken;
}
代码示例来源:origin: lucene/lucene
/**
* @return Returns the next token in the stream, or null at EOS
*/
public final Token next() throws IOException
{
if ((token = input.next()) == null)
{
return null;
}
else
{
String s = stemmer.stem(token.termText());
if (!s.equals(token.termText()))
{
return new Token(s, token.startOffset(), token.endOffset(),
token.type());
}
return token;
}
}
内容来源于网络,如有侵权,请联系作者删除!