演示非常简单。什么时候用 hadoop jar hia-1.0-SNAPSHOT.jar cha1.CharCount
,但有例外:
Error: java.lang.RuntimeException: java.lang.ClassNotFoundException: Class cha1.CharCount$CMapper not found
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:1587)
at org.apache.hadoop.mapreduce.task.JobContextImpl.getMapperClass(JobContextImpl.java:186)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:715)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:338)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:157)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1408)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:152)
Caused by: java.lang.ClassNotFoundException: Class cha1.CharCount$CMapper not found
at org.apache.hadoop.conf.Configuration.getClassByName(Configuration.java:1493)
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:1585)
... 8 more
这是演示代码
package cha1;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* User: mzang
* Date: 10/31/13
* Time: 4:21 PM
*/
public class CharCount extends Configured implements Tool {
class CMapper extends Mapper<LongWritable, Text, ByteWritable, LongWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
byte[] content = value.getBytes();
Map<ByteWritable, Integer> map = new HashMap<ByteWritable, Integer>();
for (byte b : content) {
ByteWritable bw = new ByteWritable(b);
Integer c = map.get(bw);
if (c == null) {
map.put(bw, 1);
} else {
c++;
map.put(bw, c);
}
}
for (Map.Entry<ByteWritable, Integer> entry : map.entrySet()) {
context.write(entry.getKey(), new LongWritable(entry.getValue()));
}
}
}
class CCombiner extends Reducer<ByteWritable, LongWritable, ByteWritable, LongWritable> {
@Override
protected void reduce(ByteWritable key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
long sum = 0;
for (LongWritable longWritable : values) {
sum += longWritable.get();
}
context.write(key, new LongWritable(sum));
}
}
class CReducer extends Reducer<ByteWritable, LongWritable, Text, Text> {
@Override
protected void reduce(ByteWritable key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
long sum = 0;
for (LongWritable longWritable : values) {
sum += longWritable.get();
}
context.write(new Text(String.valueOf(key.get())), new Text(String.valueOf(sum)));
}
}
public int run(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Job job = Job.getInstance(this.getConf());
FileOutputFormat.setOutputPath(job, new Path("/tmp/test/output"));
FileInputFormat.setInputPaths(job, new Path("/tmp/test/input"));
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setMapperClass(CMapper.class);
job.setCombinerClass(CCombiner.class);
job.setReducerClass(CReducer.class);
job.submit();
return job.waitForCompletion(true) ? 0 : 1;
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.addResource("hdfs-site.xml");
int res = ToolRunner.run(conf, new CharCount(), args);
System.exit(res);
}
}
它应该可以在apachehadoop上运行。
我查了hadoop日志。它什么也没说,但很多尝试都失败了。
然后我试着检查一下 hadoop fs -ls /tmp/hadoop-yarn/staging/history/
. 但是两个目录done和done是空的。
jar应该分发给hdfs,并由每个任务跟踪器下载,对吗?类charcount$cmapper应该在jar中。
1条答案
按热度按时间rhfm7lfc1#
制造
CMapper
static
以及public
. 事实并非如此static
这意味着hadoop无法创建示例,因为它不知道它是的内部类CharCount
.