我试图从一个文本文件中搜索一个特定的字符串和该字符串的出现,但在运行此代码后,我得到io.longwritable之间的classcastexception。
Error: java.lang.ClassCastException: org.apache.hadoop.io.LongWritable cannot be cast to org.apache.hadoop.io.Text
at searchaString.SearchDriver$searchMap.map(SearchDriver.java:1)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:146)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:168)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:162)
16/04/30 02:48:17 info mapreduce.job:Map0%reduce 0%16/04/30 02:48:23 info mapreduce.job:任务id:尝试\u 146163007194 \u 0021 \u m \u000000 \u 2,状态:失败错误:java.lang.classcastexception:org.apache.hadoop.io.longwritable无法转换为org.apache.hadoop.io.text
package samples.wordcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
//import org.apache.hadoop.util.GenericOptionsParser;
//import org.apache.hadoop.mapred.lib.NLineInputFormat;
import java.io.IOException;
import java.util.Iterator;
public class WordCount {
public static void main(String[] args) throws Exception {
@SuppressWarnings("unused")
JobClient jobC =new JobClient();
Configuration conf = new Configuration();
//String args[] = parser.getRemainingArgs();
Job job = Job.getInstance(conf);
job.setJobName("WordCount");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setReducerClass(IntSumReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
/*String MyWord = args[2];
TokenizerMapper.find = MyWord;*/
System.exit(job.waitForCompletion(true) ? 0:1);
}
public static class TokenizerMapper extends Mapper<Text, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
// private Text word = new Text();
static String find="txt was not created";
public int i;
public void map(Text key, Text value,OutputCollector<Text, IntWritable> output,Reporter reporter) throws IOException, InterruptedException
{
String cleanLine = value.toString();
String[] cleanL =cleanLine.split("home");
output.collect(new Text(cleanL[1]), one);
}
}
public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output,Reporter reporter)
throws IOException, InterruptedException {
int sum = 0;
String wordText="txt was not created";
while(values.hasNext()) {
Boolean check = values.toString().contains("txt was not created");
if(check)
{
String[] cleanL =values.toString().split("\\.");
for(String w : cleanL)
{
if(w.length()>=wordText.length())
{
String wrd = w.substring(0,wordText.length());
if(wrd.equals(wordText))
{
IntWritable value=values.next();
sum += value.get();
}
}
}
}
}
output.collect(key,new IntWritable(sum));
}
}
}
我是这个mapreduce的新手,不知道该怎么做。
我的文本文件也是这样的:
tab/hdhdhd/hip/home.slkj.skjdh.dgsyququ/djkdjjd.*未创建文本我必须搜索特定文本。
请回复。
如果您分享一些解决方案,请简要解释我应该在代码中更改什么。
谢谢。
2条答案
按热度按时间xe55xuns1#
新Map器:公共类tokenizermapper扩展Map器
而ur write方法是cont.write(new text(cleanl[1]),one);
“一”不是一回事。或者像下面这样更改您的签名公共类tokenizermapper扩展Map器并编写如下
继续写入(新文本(cleanl[1]),新文本(“一”));
或
公共类tokenizermapper扩展Map器并作为
cont.write(新文本(cleanl[1]),新intwritable(1);
ezykj2lf2#
你已经给了mapper类的签名如下
公共静态类tokenizermapper扩展Map器
map方法的输入键是行的字节偏移量。例如,如果下面是你的文件的内容
你好,世界!
Map功能将以第一行的字节偏移量(十六进制)为键,并显示“hello world!”作为价值。字节偏移量是一个长值kinf。
将输入键更改为longwritable