我试图从一个hbase表中读取数据,对其进行一点处理,并使用以下代码将其存储在另一个表中
package analysis;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
public class Author_ref {
public static class MyMapper extends TableMapper<Text,Text> {
public void map(ImmutableBytesWritable row, Result value,Context context)throws IOException, InterruptedException
{
String key = new String(row.get());
String values = new String(value.getValue(Bytes.toBytes("authors"), Bytes.toBytes("authors")));
String clean_values = values.replaceAll("[","");
String clean_values2 = clean_values.replaceAll("]","");
String authors[] = clean_values2.trim().split(",");
for (String author : authors)
{
//Put row = new Put();
context.write(new Text(author),new Text( key));
}
}
}
public static class MyReducer extends TableReducer<Text, Text, ImmutableBytesWritable>
{
public void reduce(Text author, Iterable<Text> values,Context context)throws IOException,InterruptedException
{
String papers = "";
for (Text x : values)
{
papers = papers + ","+x.toString();
}
Put p = new Put(author.getBytes());
p.add(Bytes.toBytes("papers_writen"),Bytes.toBytes("papers_writen"),Bytes.toBytes(papers));
context.write(null, p);
}
}
public static void main(String[] args) throws Exception
{
Configuration config = HBaseConfiguration.create();
Job job = new Job(config,"ExampleSummary");
Scan scan = new Scan();
scan.setCaching(500); // 1 is the default in Scan, which will be bad for MapReduce jobs
scan.setCacheBlocks(false);
job.setJarByClass(Author_ref.class); // class that contains mapper and reducer
TableMapReduceUtil.initTableMapperJob(
"Dataset", // input table
scan, // Scan instance to control CF and attribute selection
MyMapper.class, // mapper class
Text.class, // mapper output key
Text.class, // mapper output value
job);
TableMapReduceUtil.initTableReducerJob(
"Author_paper", // output table
MyReducer.class, // reducer class
job);
job.setNumReduceTasks(1); // at least one, adjust as required
System.exit(job.waitForCompletion(true)?0:1);
}
}
我得到以下错误。。
线程“main”java.lang.nosuchmethoderror中出现异常:org.apache.hadoop.yarn.api.records.url.fromuri(ljava/net/uri;)lorg/apache/hadoop/yarn/api/records/url;在org.apache.hadoop.mapreduce.v2.util.localresourcebuilder.createlocalresources(localresourcebuilder。java:144)在org.apache.hadoop.mapreduce.v2.util.mrapps.setupdistributedcache(mrapps。java:531)在org.apache.hadoop.mapred.localdistributedcachemanager.setup(localdistributedcachemanager。java:92)在org.apache.hadoop.mapred.localjobrunner$job.(localjobrunner。java:171)在org.apache.hadoop.mapred.localjobrunner.submitjob(localjobrunner。java:760)在org.apache.hadoop.mapreduce.jobsubmitter.submitjobinternal(jobsubmitter。java:253)在org.apache.hadoop.mapreduce.job$11.run(job。java:1570)在org.apache.hadoop.mapreduce.job$11.run(job。java:1567)在javax.security.auth.subject.doas(主题)中的java.security.accesscontroller.doprivileged(本机方法)。java:422)在org.apache.hadoop.security.usergroupinformation.doas(usergroupinformation。java:1889)在org.apache.hadoop.mapreduce.job.submit(作业。java:1567)在org.apache.hadoop.mapreduce.job.waitforcompletion(作业。java:1588)在分析.author\u ref.main(作者\u ref。java:111)
我使用的是hadoop2.9和hbase 1.2.6.1
1条答案
按热度按时间nszi6y051#
hadoop2.9和hbase1.2.x不兼容,看看这个
你必须使用兼容的版本。