本文整理了Java中org.apache.uima.cas.CAS.createFilteredIterator()
方法的一些代码示例,展示了CAS.createFilteredIterator()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。CAS.createFilteredIterator()
方法的具体详情如下:
包路径:org.apache.uima.cas.CAS
类名称:CAS
方法名:createFilteredIterator
[英]Create an iterator over structures satisfying a given constraint. Constraints are described in the javadocs for ConstraintFactory and related classes.
[中]在满足给定约束的结构上创建迭代器。约束在ConstraintFactory和相关类的javadocs中进行了描述。
代码示例来源:origin: apache/opennlp
containerAnnotation);
Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator(
allRemoveAnnotations.iterator(), containingConstraint);
代码示例来源:origin: apache/opennlp
protected void process(CAS cas, AnnotationFS sentenceAnnotation) {
FSIndex<AnnotationFS> allTokens = cas.getAnnotationIndex(mTokenType);
ContainingConstraint containingConstraint =
new ContainingConstraint(sentenceAnnotation);
String sentence = sentenceAnnotation.getCoveredText();
Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator(
allTokens.iterator(), containingConstraint);
List<Span> tokenSpans = new LinkedList<>();
while (containingTokens.hasNext()) {
AnnotationFS token = containingTokens.next();
tokenSpans.add(new Span(token.getBegin() - sentenceAnnotation.getBegin(),
token.getEnd() - sentenceAnnotation.getBegin()));
}
ParseConverter converter = new ParseConverter(sentence, tokenSpans.toArray(new Span[tokenSpans.size()]));
Parse unparsedTree = converter.getParseForTagger();
if (unparsedTree.getChildCount() > 0) {
Parse parse = mParser.parse(unparsedTree);
// TODO: We need a strategy to handle the case that a full
// parse could not be found. What to do in this case?
parse = converter.transformParseFromTagger(parse);
if (mLogger.isLoggable(Level.INFO)) {
StringBuffer parseString = new StringBuffer();
parse.show(parseString);
mLogger.log(Level.INFO, parseString.toString());
}
createAnnotation(cas, sentenceAnnotation.getBegin(), parse);
}
}
代码示例来源:origin: CLLKazan/UIMA-Ext
public static <F extends FeatureStructure> FSIterator<F> filter(CAS cas,
FSIterator<F> srcIter, FSMatchConstraint... constraints) {
if (constraints.length == 0) {
return srcIter;
}
FSMatchConstraint resultConstr = and(constraints);
return cas.createFilteredIterator(srcIter, resultConstr);
}
代码示例来源:origin: CLLKazan/UIMA-Ext
/**
* {@inheritDoc}
*/
@Override
public FSIterator<AnnotationFS> extract(CAS cas) {
// TODO optimization point - get common ancestor type if any
FSIterator<AnnotationFS> allAnnoIter = cas.getAnnotationIndex().iterator();
return cas.createFilteredIterator(allAnnoIter, annoMatchConstraint);
}
代码示例来源:origin: Ailab403/ailab-mltk4j
private void process(CAS tcas, AnnotationFS sentence) {
FSIndex<AnnotationFS> allTokens = tcas.getAnnotationIndex(mTokenType);
ContainingConstraint containingConstraint =
new ContainingConstraint(sentence);
List<String> tokens = new ArrayList<String>();
List<String> tags = new ArrayList<String>();
Iterator<AnnotationFS> containingTokens = tcas.createFilteredIterator(
allTokens.iterator(), containingConstraint);
while (containingTokens.hasNext()) {
AnnotationFS tokenAnnotation = (AnnotationFS) containingTokens.next();
String tag = tokenAnnotation.getFeatureValueAsString(mPOSFeature);
tokens.add(tokenAnnotation.getCoveredText().trim());
tags.add(tag);
}
mPOSSamples.add(new POSSample(tokens, tags));
}
代码示例来源:origin: org.apache.opennlp/opennlp-uima
containerAnnotation);
Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator(
allRemoveAnnotations.iterator(), containingConstraint);
代码示例来源:origin: Ailab403/ailab-mltk4j
private void processChunk(CAS tcas, AnnotationFS chunk) {
String chunkTag = chunk.getFeatureValueAsString(mChunkTagFeature);
FSIndex<AnnotationFS> tokenIndex = tcas.getAnnotationIndex(mTokenType);
ContainingConstraint containingConstraint =
new ContainingConstraint(chunk);
Iterator<AnnotationFS> tokenIterator = tcas.createFilteredIterator(tokenIndex.iterator(),
containingConstraint);
List<String> tokens = new ArrayList<String>();
List<String> tags = new ArrayList<String>();;
List<String> chunkTags = new ArrayList<String>();;
while (tokenIterator.hasNext()) {
AnnotationFS tokenAnnotation = tokenIterator.next();
tokens.add(tokenAnnotation.getCoveredText().trim());
tags.add(tokenAnnotation.getFeatureValueAsString(mPOSFeature));
chunkTags.add(chunkTag);
}
mChunkSamples.add(new ChunkSample(tokens, tags, chunkTags));
}
代码示例来源:origin: Ailab403/ailab-mltk4j
private void processSentence(CAS tcas, AnnotationFS sentence) {
FSIndex<AnnotationFS> chunkIndex = tcas.getAnnotationIndex(mChunkType);
ContainingConstraint containingConstraint =
new ContainingConstraint(sentence);
Iterator<AnnotationFS> chunkIterator = tcas.createFilteredIterator(
chunkIndex.iterator(), containingConstraint);
while (chunkIterator.hasNext()) {
AnnotationFS chunkAnnotation = (AnnotationFS) chunkIterator.next();
processChunk(tcas, (chunkAnnotation));
}
}
代码示例来源:origin: org.apache.uima/textmarker-core
public FSIterator<AnnotationFS> getFilteredBasicIterator(FSMatchConstraint constraint) {
ConstraintFactory cf = cas.getConstraintFactory();
FSMatchConstraint matchConstraint = cf.and(constraint, filter.getDefaultConstraint());
return cas.createFilteredIterator(basicIt, matchConstraint);
}
代码示例来源:origin: org.apache.uima/ruta-core
public FSIterator<AnnotationFS> createFilteredIterator(CAS cas, Type basicType) {
if (windowAnnotation != null) {
FSIterator<AnnotationFS> windowIt = cas.getAnnotationIndex(basicType)
.subiterator(windowAnnotation);
FSIterator<AnnotationFS> iterator = cas.createFilteredIterator(windowIt,
createCurrentConstraint(false));
return iterator;
} else {
FSIterator<AnnotationFS> iterator = cas.createFilteredIterator(
cas.getAnnotationIndex(basicType).iterator(), createCurrentConstraint(false));
return iterator;
}
}
代码示例来源:origin: org.apache.uima/ruta-core
public FSIterator<AnnotationFS> getFilteredBasicIterator(FSMatchConstraint constraint) {
ConstraintFactory cf = cas.getConstraintFactory();
FSMatchConstraint matchConstraint = cf.and(constraint, filter.getDefaultConstraint());
return cas.createFilteredIterator(basicIt, matchConstraint);
}
代码示例来源:origin: org.apache.uima/textmarker-core
public FSIterator<AnnotationFS> createFilteredIterator(CAS cas, Type basicType) {
if (windowAnnotation != null) {
FSIterator<AnnotationFS> windowIt = cas.getAnnotationIndex(basicType).subiterator(
windowAnnotation);
FSIterator<AnnotationFS> iterator = cas.createFilteredIterator(windowIt,
createCurrentConstraint(false));
return iterator;
} else {
FSIterator<AnnotationFS> iterator = cas.createFilteredIterator(
cas.getAnnotationIndex(basicType).iterator(), createCurrentConstraint(false));
return iterator;
}
}
代码示例来源:origin: org.apache.uima/ruta-core
public List<RutaBasic> getBasicsInWindow(AnnotationFS windowAnnotation) {
List<RutaBasic> result = new ArrayList<RutaBasic>();
if (windowAnnotation instanceof RutaBasic) {
result.add((RutaBasic) windowAnnotation);
return result;
}
FSMatchConstraint defaultConstraint = filter.getDefaultConstraint();
FSIterator<AnnotationFS> iterator = cas.createFilteredIterator(
cas.getAnnotationIndex(basicType).subiterator(windowAnnotation), defaultConstraint);
while (iterator.isValid()) {
result.add((RutaBasic) iterator.get());
iterator.moveToNext();
}
return result;
}
代码示例来源:origin: Ailab403/ailab-mltk4j
private void process(CAS tcas, AnnotationFS sentence) {
FSIndex<AnnotationFS> allTokens = tcas.getAnnotationIndex(mTokenType);
ContainingConstraint containingConstraint =
new ContainingConstraint(sentence);
Iterator<AnnotationFS> containingTokens = tcas.createFilteredIterator(
allTokens.iterator(), containingConstraint);
List<Span> openNLPSpans = new LinkedList<Span>();
while (containingTokens.hasNext()) {
AnnotationFS tokenAnnotation = containingTokens.next();
openNLPSpans.add(new Span(tokenAnnotation.getBegin()
- sentence.getBegin(), tokenAnnotation.getEnd()
- sentence.getBegin()));
}
Span[] spans = openNLPSpans.toArray(new Span[openNLPSpans.size()]);
Arrays.sort(spans);
tokenSamples.add(new TokenSample(sentence.getCoveredText(), spans));
}
代码示例来源:origin: org.apache.uima/textmarker-core
public List<TextMarkerBasic> getBasicsInWindow(AnnotationFS windowAnnotation) {
List<TextMarkerBasic> result = new ArrayList<TextMarkerBasic>();
if (windowAnnotation instanceof TextMarkerBasic) {
result.add((TextMarkerBasic) windowAnnotation);
return result;
}
FSMatchConstraint defaultConstraint = filter.getDefaultConstraint();
FSIterator<AnnotationFS> iterator = cas.createFilteredIterator(cas
.getAnnotationIndex(basicType).subiterator(windowAnnotation), defaultConstraint);
while (iterator.isValid()) {
result.add((TextMarkerBasic) iterator.get());
iterator.moveToNext();
}
return result;
}
代码示例来源:origin: org.apache.uima/uimaj-ep-cas-editor
@Override
public Object[] getElements(Object inputElement) {
if (mCurrentType == null) {
return new Object[] {};
}
StrictTypeConstraint typeConstrain = new StrictTypeConstraint(mCurrentType);
FSIterator<FeatureStructure> strictTypeIterator =mDocument.getCAS().createFilteredIterator(
mDocument.getCAS().getIndexRepository().getAllIndexedFS(mCurrentType), typeConstrain);
LinkedList<ModelFeatureStructure> featureStrucutreList = new LinkedList<ModelFeatureStructure>();
while (strictTypeIterator.hasNext()) {
featureStrucutreList.add(new ModelFeatureStructure(mDocument,
strictTypeIterator.next()));
}
ModelFeatureStructure[] featureStructureArray = new ModelFeatureStructure[featureStrucutreList
.size()];
featureStrucutreList.toArray(featureStructureArray);
return featureStructureArray;
}
代码示例来源:origin: org.apache.uima/uimaj-ep-cas-editor
/**
* Retrieves annotations of the given type from the {@link CAS}.
*
* @param type the type
* @return the annotations
*/
@Override
public Collection<AnnotationFS> getAnnotations(Type type) {
FSIndex<AnnotationFS> annotationIndex = mCAS.getAnnotationIndex(type);
StrictTypeConstraint typeConstrain = new StrictTypeConstraint(type);
FSIterator<AnnotationFS> strictTypeIterator = mCAS
.createFilteredIterator(annotationIndex.iterator(), typeConstrain);
return fsIteratorToCollection(strictTypeIterator);
}
代码示例来源:origin: org.apache.opennlp/opennlp-uima
protected void process(CAS cas, AnnotationFS sentenceAnnotation) {
FSIndex<AnnotationFS> allTokens = cas.getAnnotationIndex(mTokenType);
ContainingConstraint containingConstraint =
new ContainingConstraint(sentenceAnnotation);
String sentence = sentenceAnnotation.getCoveredText();
Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator(
allTokens.iterator(), containingConstraint);
List<Span> tokenSpans = new LinkedList<>();
while (containingTokens.hasNext()) {
AnnotationFS token = containingTokens.next();
tokenSpans.add(new Span(token.getBegin() - sentenceAnnotation.getBegin(),
token.getEnd() - sentenceAnnotation.getBegin()));
}
ParseConverter converter = new ParseConverter(sentence, tokenSpans.toArray(new Span[tokenSpans.size()]));
Parse unparsedTree = converter.getParseForTagger();
if (unparsedTree.getChildCount() > 0) {
Parse parse = mParser.parse(unparsedTree);
// TODO: We need a strategy to handle the case that a full
// parse could not be found. What to do in this case?
parse = converter.transformParseFromTagger(parse);
if (mLogger.isLoggable(Level.INFO)) {
StringBuffer parseString = new StringBuffer();
parse.show(parseString);
mLogger.log(Level.INFO, parseString.toString());
}
createAnnotation(cas, sentenceAnnotation.getBegin(), parse);
}
}
代码示例来源:origin: Ailab403/ailab-mltk4j
Iterator<AnnotationFS> containingTokens = cas.createFilteredIterator(tokenAnnotations
.iterator(), sentenceContainingConstraint);
Iterator<AnnotationFS> containingNames = cas.createFilteredIterator(allNames.iterator(),
sentenceContainingConstraint);
代码示例来源:origin: org.apache.uima/uimaj-ep-cas-editor
cas.createFilteredIterator(allAnnotations.iterator(),
annotationInSpanAndStrictTypeConstraint);
内容来源于网络,如有侵权,请联系作者删除!