如何修复nosuchmethoderror:org.apache.hadoop.mapred.inputsplit.write

ohfgkhjo  于 2021-05-29  发布在  Hadoop
关注(0)|答案(1)|浏览(274)

我正在写一个关于hadoop的项目。我有一个一维字符串数组。它的名字是“words”。我
我想把它送到减速机,但我得到了这个错误:

Exception in thread "main" java.lang.NoSuchMethodError:org.apache.hadoop.mapred .InputSplit.write(Ljava/io/DataOutput;)V

我该怎么办?
有人能帮我吗?
这是我的Map:

public  abstract  class Mapn  implements Mapper<LongWritable, Text, Text, Text>{
@SuppressWarnings("unchecked")
public void map(LongWritable key, Text value, Context con) throws IOException, InterruptedException

        {                   
            String line = value.toString();
            String[] words=line.split(",");
            for(String word: words )
            {
                  Text outputKey = new Text(word.toUpperCase().trim());

              con.write(outputKey, words);
            }
            }

            }
h43kikqp

h43kikqp1#

当我学习的时候
hadoop mapreduce 工具,我写我自己的程序除了传统的 WordCount 然后导出jar。现在,我正在和你分享我为之写的程序 hadoop-1.2.1 jar依赖项。它是用来转换数字和写的话,这是4个拉丁美洲和加勒比海地区的数字没有任何单一的错误处理。
以下是程序:

package com.whodesire.count;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import com.whodesire.numstats.AmtInWords;

public class CountInWords {

    public static class NumberTokenizerMapper 
                    extends Mapper <Object, Text, LongWritable, Text> {

        private static final Text theOne = new Text("1");
        private LongWritable longWord = new LongWritable();

        public void map(Object key, Text value, Context context) {

            try{
                StringTokenizer itr = new StringTokenizer(value.toString());
                while (itr.hasMoreTokens()) {
                    longWord.set(Long.parseLong(itr.nextToken()));
                    context.write(longWord, theOne);
                }
            }catch(ClassCastException cce){
                System.out.println("ClassCastException raiseddd...");
                System.exit(0);
            }catch(IOException | InterruptedException ioe){
                ioe.printStackTrace();
                System.out.println("IOException | InterruptedException raiseddd...");
                System.exit(0);
            }
        }
    }

    public static class ModeReducerCumInWordsCounter 
            extends Reducer <LongWritable, Text, LongWritable, Text>{
        private Text result = new Text();

        //This is the user defined reducer function which is invoked for each unique key
        public void reduce(LongWritable key, Iterable<Text> values, 
                Context context) throws IOException, InterruptedException {

            /***Putting the key, which is a LongWritable value, 
                        putting in AmtInWords constructor as String***/
            AmtInWords aiw = new AmtInWords(key.toString());
            result.set(aiw.getInWords());

            //Finally the word and counting is sent to Hadoop MR and thus to target
            context.write(key, result);
        }
    }

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

        /****
       ***all random numbers generated inside input files has been
       ***generated using url https://andrew.hedges.name/experiments/random/
       ****/

        //Load the configuration files and add them to the the conf object
        Configuration conf = new Configuration();       

        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

        Job job = new Job(conf, "CountInWords");

        //Specify the jar which contains the required classes for the job to run.
        job.setJarByClass(CountInWords.class);

        job.setMapperClass(NumberTokenizerMapper.class);
        job.setCombinerClass(ModeReducerCumInWordsCounter.class);
        job.setReducerClass(ModeReducerCumInWordsCounter.class);

        //Set the output key and the value class for the entire job
        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(Text.class);

        //Set the Input (format and location) and similarly for the output also
        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

        //Setting the Results to Single Target File
        job.setNumReduceTasks(1);

        //Submit the job and wait for it to complete
        System.exit(job.waitForCompletion(true) ? 0 : 1);       
    }
}

我建议您检查您添加的hadoopjar,特别是hadoop-core-x.x.x.jar,因为在看到您的错误之后,您似乎还没有向项目添加一些mapreducejar。

相关问题