edu.stanford.nlp.pipeline.Annotation.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(8.7k)|赞(0)|评价(0)|浏览(90)

本文整理了Java中edu.stanford.nlp.pipeline.Annotation.<init>()方法的一些代码示例,展示了Annotation.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Annotation.<init>()方法的具体详情如下:
包路径:edu.stanford.nlp.pipeline.Annotation
类名称:Annotation
方法名:<init>

Annotation.<init>介绍

[英]Copy constructor.
[中]复制构造函数。

代码示例

代码示例来源:origin: stanfordnlp/CoreNLP

/** Copies the map, but not a deep copy.
 *  @return The copy
 */
public Annotation copy() {
 return new Annotation(this);
}

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
 public Annotation createFromText(String text) throws IOException {
  return new Annotation(text);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

public CoreDocument(String documentText) {
 this.annotationDocument = new Annotation(documentText);
}

代码示例来源:origin: stanfordnlp/CoreNLP

/**
 * Given a list of sentences (as CoreMaps), wrap it in a new Annotation.
 */
public static Annotation createDataset(List<CoreMap> sentences) {
 Annotation dataset = new Annotation("");
 addSentences(dataset,sentences);
 return dataset;
}

代码示例来源:origin: stanfordnlp/CoreNLP

/**
 * Runs the entire pipeline on the content of the given text passed in.
 * @param text The text to process
 * @return An Annotation object containing the output of all annotators
 */
public Annotation process(String text) {
 Annotation annotation = new Annotation(text);
 annotate(annotation);
 return annotation;
}

代码示例来源:origin: stanfordnlp/CoreNLP

/**
 * Runs the entire pipeline on the content of the given text passed in.
 * @param text The text to process
 * @return An Annotation object containing the output of all annotators
 */
public Annotation process(String text) {
 Annotation annotation = new Annotation(text);
 annotate(annotation);
 return annotation;
}

代码示例来源:origin: stanfordnlp/CoreNLP

public static void main(String[] args) {

  Properties props = StringUtils.propFileToProperties("projects/core/src/edu/stanford/nlp/classify/mood.prop");
  StanfordCoreNLP pipeline = new StanfordCoreNLP(props);

  Annotation happyAnnotation = new Annotation("I am so glad this is awesome");
  pipeline.annotate(happyAnnotation);
  Annotation sadAnnotation = new Annotation("I am so gloomy and depressed");
  pipeline.annotate(sadAnnotation);
  Annotation bothAnnotation = new Annotation("I am so gloomy gloomy gloomy gloomy glad");
  pipeline.annotate(bothAnnotation);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public Annotation read(String path) throws IOException {
 Annotation doc = new Annotation("");
 logger.info("Reading file: " + path);
 // Each iteration through this loop processes a single sentence along with any relations in it
 for (Iterator<String> lineIterator = IOUtils.readLines(path).iterator(); lineIterator.hasNext(); ) {
  Annotation sentence = readSentence(path, lineIterator);
  AnnotationUtils.addSentence(doc, sentence);
 }
 return doc;
}

代码示例来源:origin: stanfordnlp/CoreNLP

public static Annotation textToAnnotation(AnnotationPipeline pipeline, String text, String date) {
 Annotation annotation = new Annotation(text);
 annotation.set(CoreAnnotations.DocDateAnnotation.class, date);
 pipeline.annotate(annotation);
 return annotation;
}

代码示例来源:origin: stanfordnlp/CoreNLP

private static CoreMap wordsToSentence(List<String> sentWords) {
 String sentText = StringUtils.join(sentWords, " ");
 Annotation sentence = new Annotation(sentText);
 List<CoreLabel> tokens = new ArrayList<>(sentWords.size());
 for (String text:sentWords) {
  CoreLabel token = tokenFactory.makeToken();
  token.set(CoreAnnotations.TextAnnotation.class, text);
  tokens.add(token);
 }
 sentence.set(CoreAnnotations.TokensAnnotation.class, tokens);
 return sentence;
}

代码示例来源:origin: Vedenin/useful-java-links

private static String[] testStanfordCoreNLP(String text) throws Exception {
  StanfordCoreNLP coreNLP = getStanfordCoreNLP();
  Annotation document = new Annotation(text);
  coreNLP.annotate(document);
  List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
  String[] result = new String[sentences.size()];
  int i = 0;
  for (CoreMap sentence : sentences) {
    result[i] = sentence.toString();
    i++;
  }
  return result;
}

代码示例来源:origin: stanfordnlp/CoreNLP

protected Tree parse(List<CoreLabel> tokens,
           List<ParserConstraint> constraints) {
 CoreMap sent = new Annotation("");
 sent.set(CoreAnnotations.TokensAnnotation.class, tokens);
 sent.set(ParserAnnotations.ConstraintAnnotation.class, constraints);
 Annotation doc = new Annotation("");
 List<CoreMap> sents = new ArrayList<>();
 sents.add(sent);
 doc.set(CoreAnnotations.SentencesAnnotation.class, sents);
 getParser().annotate(doc);
 sents = doc.get(CoreAnnotations.SentencesAnnotation.class);
 return sents.get(0).get(TreeCoreAnnotations.TreeAnnotation.class);
}

代码示例来源:origin: stanfordnlp/CoreNLP

private Tree parse(List<CoreLabel> tokens,
          List<ParserConstraint> constraints) {
 CoreMap sent = new Annotation("");
 sent.set(CoreAnnotations.TokensAnnotation.class, tokens);
 sent.set(ParserAnnotations.ConstraintAnnotation.class, constraints);
 Annotation doc = new Annotation("");
 List<CoreMap> sents = new ArrayList<>(1);
 sents.add(sent);
 doc.set(CoreAnnotations.SentencesAnnotation.class, sents);
 getParser().annotate(doc);
 sents = doc.get(CoreAnnotations.SentencesAnnotation.class);
 return sents.get(0).get(TreeCoreAnnotations.TreeAnnotation.class);
}

代码示例来源:origin: stanfordnlp/CoreNLP

private Tree parse(List<CoreLabel> tokens,
          List<ParserConstraint> constraints) {
 CoreMap sent = new Annotation("");
 sent.set(CoreAnnotations.TokensAnnotation.class, tokens);
 sent.set(ParserAnnotations.ConstraintAnnotation.class, constraints);
 Annotation doc = new Annotation("");
 List<CoreMap> sents = new ArrayList<>(1);
 sents.add(sent);
 doc.set(CoreAnnotations.SentencesAnnotation.class, sents);
 getParser().annotate(doc);
 sents = doc.get(CoreAnnotations.SentencesAnnotation.class);
 return sents.get(0).get(TreeCoreAnnotations.TreeAnnotation.class);
}

代码示例来源:origin: stanfordnlp/CoreNLP

public Annotation process(String sentence, String dateString, Annotator timeAnnotator) {
 log.info("Processing text \"" + sentence + "\" with dateString = " + dateString);
 Annotation anno = new Annotation(sentence);
 if (dateString != null && ! dateString.isEmpty()) {
  anno.set(CoreAnnotations.DocDateAnnotation.class, dateString);
 }
 pipeline.annotate(anno);
 timeAnnotator.annotate(anno);
 return anno;
}

代码示例来源:origin: stanfordnlp/CoreNLP

/**
  * A debugging method to try entity linking sentences from the console.
  * @throws IOException
  */
 public static void main(String[] args) throws IOException {
  Properties props = StringUtils.argsToProperties(args);
  props.setProperty("annotators", "tokenize,ssplit,pos,lemma,ner,entitymentions,entitylink");
  StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
  IOUtils.console("sentence> ", line -> {
   Annotation ann = new Annotation(line);
   pipeline.annotate(ann);
   List<CoreLabel> tokens = ann.get(CoreAnnotations.SentencesAnnotation.class).get(0).get(CoreAnnotations.TokensAnnotation.class);
   System.err.println(StringUtils.join(tokens.stream().map(x -> x.get(CoreAnnotations.WikipediaEntityAnnotation.class)), "  "));
  });
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

public static void main(String[] args) throws IOException {
 StanfordCoreNLP pipeline = new StanfordCoreNLP(PropertiesUtils.asProperties("annotators", "tokenize,ssplit,pos,lemma"));
 QuestionToStatementTranslator translator = new QuestionToStatementTranslator();
 IOUtils.console("question> ", question -> {
  Annotation ann = new Annotation(question);
  pipeline.annotate(ann);
  List<CoreLabel> tokens = ann.get(CoreAnnotations.TokensAnnotation.class);
  List<List<CoreLabel>> statements = translator.toStatement(tokens);
  for (List<CoreLabel> statement : statements) {
   System.out.println("  -> " + StringUtils.join(statement.stream().map(CoreLabel::word), " "));
  }
 });
}

代码示例来源:origin: stanfordnlp/CoreNLP

/**
 * @see SimpleSentiment#classify(CoreMap)
 */
public SentimentClass classify(String text) {
 Annotation ann = new Annotation(text);
 pipeline.get().annotate(ann);
 CoreMap sentence = ann.get(CoreAnnotations.SentencesAnnotation.class).get(0);
 Counter<String> features = featurize(sentence);
 RVFDatum<SentimentClass, String> datum = new RVFDatum<>(features);
 return impl.classOf(datum);
}

代码示例来源:origin: stanfordnlp/CoreNLP

/**
 * TODO(gabor) JavaDoc
 *
 * @param sentence
 * @param pipeline
 */
public static void annotate(CoreMap sentence, AnnotationPipeline pipeline) {
 Annotation ann = new Annotation(StringUtils.join(sentence.get(CoreAnnotations.TokensAnnotation.class), " "));
 ann.set(CoreAnnotations.TokensAnnotation.class, sentence.get(CoreAnnotations.TokensAnnotation.class));
 ann.set(CoreAnnotations.SentencesAnnotation.class, Collections.singletonList(sentence));
 pipeline.annotate(ann);
}

代码示例来源:origin: stanfordnlp/CoreNLP

private static Annotation testAnnoation(String text,String[] args){
 Annotation document = new Annotation(text);
 Properties props = StringUtils.argsToProperties(args);
 StanfordCoreNLP corenlp = new StanfordCoreNLP(props);
 corenlp.annotate(document);
 HybridCorefAnnotator hcoref = new HybridCorefAnnotator(props);
 hcoref.annotate(document);
 return document;
}

相关文章