本文整理了Java中org.antlr.v4.tool.Grammar.getVocabulary()
方法的一些代码示例,展示了Grammar.getVocabulary()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Grammar.getVocabulary()
方法的具体详情如下:
包路径:org.antlr.v4.tool.Grammar
类名称:Grammar
方法名:getVocabulary
[英]Gets a Vocabulary instance describing the vocabulary used by the grammar.
[中]获取描述语法使用的词汇表的词汇表实例。
代码示例来源:origin: org.antlr/antlr4
public GrammarParserInterpreter(Grammar g, ATN atn, TokenStream input) {
super(g.fileName, g.getVocabulary(),
Arrays.asList(g.getRuleNames()),
atn, // must run ATN through serializer to set some state flags
input);
this.g = g;
decisionStatesThatSetOuterAltNumInContext = findOuterMostDecisionStates();
stateToAltsMap = new int[g.atn.states.size()][];
}
代码示例来源:origin: com.tunnelvisionlabs/antlr4
public GrammarParserInterpreter(Grammar g, ATN atn, TokenStream input) {
super(g.fileName, g.getVocabulary(),
Arrays.asList(g.getRuleNames()),
atn, // must run ATN through serializer to set some state flags
input);
this.g = g;
decisionStatesThatSetOuterAltNumInContext = findOuterMostDecisionStates();
stateToAltsMap = new int[g.atn.states.size()][];
}
代码示例来源:origin: uk.co.nichesolutions/antlr4
public GrammarParserInterpreter(Grammar g, ATN atn, TokenStream input) {
super(g.fileName, g.getVocabulary(),
Arrays.asList(g.getRuleNames()),
atn, // must run ATN through serializer to set some state flags
input);
this.g = g;
decisionStatesThatSetOuterAltNumInContext = findOuterMostDecisionStates();
stateToAltsMap = new int[g.atn.states.size()][];
}
代码示例来源:origin: io.virtdata/virtdata-lib-realer
public GrammarParserInterpreter(Grammar g, ATN atn, TokenStream input) {
super(g.fileName, g.getVocabulary(),
Arrays.asList(g.getRuleNames()),
atn, // must run ATN through serializer to set some state flags
input);
this.g = g;
decisionStatesThatSetOuterAltNumInContext = findOuterMostDecisionStates();
stateToAltsMap = new int[g.atn.states.size()][];
}
代码示例来源:origin: com.tunnelvisionlabs/antlr4
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
if (this.isLexer()) {
throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn, Arrays.asList(getRuleNames()));
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new ParserInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), deserialized, tokenStream);
}
代码示例来源:origin: org.antlr/antlr4
public LexerInterpreter createLexerInterpreter(CharStream input) {
if (this.isParser()) {
throw new IllegalStateException("A lexer interpreter can only be created for a lexer or combined grammar.");
}
if (this.isCombined()) {
return implicitLexer.createLexerInterpreter(input);
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
List<String> allChannels = new ArrayList<String>();
allChannels.add("DEFAULT_TOKEN_CHANNEL");
allChannels.add("HIDDEN");
allChannels.addAll(channelValueToNameList);
return new LexerInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), allChannels, ((LexerGrammar)this).modes.keySet(), deserialized, input);
}
代码示例来源:origin: com.tunnelvisionlabs/antlr4
buf.append("-").append(not?"~":"").append(st.label().toString(g.getVocabulary())).append("->").append(getStateString(t.target)).append('\n');
代码示例来源:origin: io.virtdata/virtdata-lib-realer
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
if (this.isLexer()) {
throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new ParserInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), deserialized, tokenStream);
}
}
代码示例来源:origin: org.antlr/antlr4
buf.append("-").append(not?"~":"").append(st.label().toString(g.getVocabulary())).append("->").append(getStateString(t.target)).append('\n');
代码示例来源:origin: uk.co.nichesolutions/antlr4
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
if (this.isLexer()) {
throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new ParserInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), deserialized, tokenStream);
}
}
代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
if (this.isLexer()) {
throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new ParserInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), deserialized, tokenStream);
}
}
代码示例来源:origin: io.virtdata/virtdata-lib-realer
public LexerInterpreter createLexerInterpreter(CharStream input) {
if (this.isParser()) {
throw new IllegalStateException("A lexer interpreter can only be created for a lexer or combined grammar.");
}
if (this.isCombined()) {
return implicitLexer.createLexerInterpreter(input);
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
List<String> allChannels = new ArrayList<String>();
allChannels.add("DEFAULT_TOKEN_CHANNEL");
allChannels.add("HIDDEN");
allChannels.addAll(channelValueToNameList);
return new LexerInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), allChannels, ((LexerGrammar)this).modes.keySet(), deserialized, input);
}
代码示例来源:origin: uk.co.nichesolutions/antlr4
buf.append("-").append(not?"~":"").append(st.label().toString(g.getVocabulary())).append("->").append(getStateString(t.target)).append('\n');
代码示例来源:origin: org.antlr/antlr4
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
if (this.isLexer()) {
throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new ParserInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), deserialized, tokenStream);
}
}
代码示例来源:origin: com.tunnelvisionlabs/antlr4
public LexerInterpreter createLexerInterpreter(CharStream input) {
if (this.isParser()) {
throw new IllegalStateException("A lexer interpreter can only be created for a lexer or combined grammar.");
}
if (this.isCombined()) {
return implicitLexer.createLexerInterpreter(input);
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn, Arrays.asList(getRuleNames()));
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
List<String> allChannels = new ArrayList<String>();
allChannels.add("DEFAULT_TOKEN_CHANNEL");
allChannels.add("HIDDEN");
allChannels.addAll(channelValueToNameList);
return new LexerInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), allChannels, ((LexerGrammar)this).modes.keySet(), deserialized, input);
}
代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded
buf.append("-").append(not?"~":"").append(st.label().toString(g.getVocabulary())).append("->").append(getStateString(t.target)).append('\n');
代码示例来源:origin: io.virtdata/virtdata-lib-realer
buf.append("-").append(not?"~":"").append(st.label().toString(g.getVocabulary())).append("->").append(getStateString(t.target)).append('\n');
代码示例来源:origin: com.impetus.fabric/fabric-jdbc-driver-shaded
public LexerInterpreter createLexerInterpreter(CharStream input) {
if (this.isParser()) {
throw new IllegalStateException("A lexer interpreter can only be created for a lexer or combined grammar.");
}
if (this.isCombined()) {
return implicitLexer.createLexerInterpreter(input);
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new LexerInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), ((LexerGrammar)this).modes.keySet(), deserialized, input);
}
代码示例来源:origin: uk.co.nichesolutions/antlr4
public LexerInterpreter createLexerInterpreter(CharStream input) {
if (this.isParser()) {
throw new IllegalStateException("A lexer interpreter can only be created for a lexer or combined grammar.");
}
if (this.isCombined()) {
return implicitLexer.createLexerInterpreter(input);
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new LexerInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), ((LexerGrammar)this).modes.keySet(), deserialized, input);
}
代码示例来源:origin: uk.co.nichesolutions/antlr4
String label = set.label().toString();
if ( isLexer ) label = set.label().toString(true);
else if ( grammar!=null ) label = set.label().toString(grammar.getVocabulary());
if ( edge instanceof NotSetTransition ) label = "~"+label;
edgeST.add("label", getEdgeLabel(label));
String label = range.label().toString();
if ( isLexer ) label = range.toString();
else if ( grammar!=null ) label = range.label().toString(grammar.getVocabulary());
edgeST.add("label", getEdgeLabel(label));
内容来源于网络,如有侵权,请联系作者删除!