本文整理了Java中org.apache.hadoop.mapreduce.RecordReader.close
方法的一些代码示例,展示了RecordReader.close
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RecordReader.close
方法的具体详情如下:
包路径:org.apache.hadoop.mapreduce.RecordReader
类名称:RecordReader
方法名:close
[英]Close the record reader.
[中]关闭读录器。
代码示例来源:origin: apache/flink
@Override
public void close() throws IOException {
if (this.recordReader != null) {
// enforce sequential close() calls
synchronized (CLOSE_MUTEX) {
this.recordReader.close();
}
}
}
代码示例来源:origin: apache/flink
@Override
public void close() throws IOException {
this.recordReader.close();
}
代码示例来源:origin: apache/hive
@Override
public void close() throws IOException {
if (realReader != null) {
realReader.close();
}
}
代码示例来源:origin: apache/incubator-gobblin
@Override
public void close() throws IOException {
this.recordReader.close();
}
}
代码示例来源:origin: apache/drill
@Override
public void close() throws IOException {
if (realReader != null) {
realReader.close();
}
}
代码示例来源:origin: thinkaurelius/titan
@Override
public void close() throws IOException {
try {
deser = null;
countedDeser.release();
} catch (Exception e) {
throw new IOException(e);
}
reader.close();
}
代码示例来源:origin: apache/hive
@Override public void close() throws IOException {
synchronized (HBASE_TABLE_MONITOR) {
recordReader.close();
closeTable();
conn.close();
}
}
代码示例来源:origin: apache/hive
@Override
public boolean hasNext() {
try {
boolean retVal = curRecReader.nextKeyValue();
if (retVal) {
return true;
}
// if its false, we need to close recordReader.
curRecReader.close();
return false;
} catch (IOException e) {
throw new RuntimeException(e);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
代码示例来源:origin: apache/avro
/**
* Verifies that a non-null record reader can be created, and the key/value types are
* as expected.
*/
@Test
public void testCreateRecordReader() throws IOException, InterruptedException {
// Set up the job configuration.
Job job = new Job();
AvroJob.setInputKeySchema(job, Schema.create(Schema.Type.STRING));
Configuration conf = job.getConfiguration();
FileSplit inputSplit = createMock(FileSplit.class);
TaskAttemptContext context = createMock(TaskAttemptContext.class);
expect(context.getConfiguration()).andReturn(conf).anyTimes();
replay(inputSplit);
replay(context);
AvroKeyInputFormat inputFormat = new AvroKeyInputFormat();
@SuppressWarnings("unchecked")
RecordReader<AvroKey<Object>, NullWritable> recordReader = inputFormat.createRecordReader(
inputSplit, context);
assertNotNull(inputFormat);
recordReader.close();
verify(inputSplit);
verify(context);
}
}
代码示例来源:origin: apache/tinkerpop
@Override
public void close() throws IOException {
this.recordReader.close();
}
}
代码示例来源:origin: apache/tinkerpop
@Override
public void close() {
try {
for (final RecordReader reader : this.readers) {
reader.close();
}
} catch (final IOException e) {
throw new IllegalStateException(e.getMessage(), e);
}
}
}
代码示例来源:origin: apache/avro
recordReader.close();
代码示例来源:origin: apache/avro
recordReader.close();
代码示例来源:origin: apache/hbase
rr.close();
代码示例来源:origin: apache/tinkerpop
@Override
public boolean hasNext() {
try {
while (true) {
if (this.edgeIterator.hasNext())
return true;
if (this.readers.isEmpty())
return false;
if (this.readers.peek().nextKeyValue()) {
this.edgeIterator = this.readers.peek().getCurrentValue().get().edges(Direction.OUT);
} else {
this.readers.remove().close();
}
}
} catch (final Exception e) {
throw new IllegalStateException(e.getMessage(), e);
}
}
}
代码示例来源:origin: apache/tinkerpop
@Override
public boolean hasNext() {
try {
if (null != this.nextVertex) return true;
else {
while (!this.readers.isEmpty()) {
if (this.readers.peek().nextKeyValue()) {
this.nextVertex = new HadoopVertex(this.readers.peek().getCurrentValue().get(), this.graph);
return true;
} else
this.readers.remove().close();
}
}
} catch (final Exception e) {
throw new IllegalStateException(e.getMessage(), e);
}
return false;
}
}
代码示例来源:origin: apache/tinkerpop
@Override
public Edge next() {
try {
while (true) {
if (this.edgeIterator.hasNext())
return new HadoopEdge(this.edgeIterator.next(), this.graph);
if (this.readers.isEmpty())
throw FastNoSuchElementException.instance();
if (this.readers.peek().nextKeyValue()) {
this.edgeIterator = this.readers.peek().getCurrentValue().get().edges(Direction.OUT);
} else {
this.readers.remove().close();
}
}
} catch (final Exception e) {
throw new IllegalStateException(e.getMessage(), e);
}
}
代码示例来源:origin: apache/tinkerpop
@Override
public Vertex next() {
try {
if (this.nextVertex != null) {
final Vertex temp = this.nextVertex;
this.nextVertex = null;
return temp;
} else {
while (!this.readers.isEmpty()) {
if (this.readers.peek().nextKeyValue())
return new HadoopVertex(this.readers.peek().getCurrentValue().get(), this.graph);
else
this.readers.remove().close();
}
}
throw FastNoSuchElementException.instance();
} catch (final Exception e) {
throw new IllegalStateException(e.getMessage(), e);
}
}
代码示例来源:origin: larsgeorge/hbase-book
reader.close();
代码示例来源:origin: kite-sdk/kite
@Override
public void close() throws IOException {
unfiltered.close();
}
}
内容来源于网络,如有侵权,请联系作者删除!