combinefileinputformat只启动一个map always hadoop 1.2.1

5anewei6  于 2021-06-03  发布在  Hadoop
关注(0)|答案(2)|浏览(416)

我正在尝试使用testcombinefileinputformat来处理几个8 mb的小文件(20个文件)。我遵循了这个博客中给出的示例。我能够实现和测试它。最终结果是正确的。但令我惊讶的是,它总是以一张Map告终。我尝试设置属性“mapred.max.split.size”各种值,如16mb、32mb等(当然是字节),但没有成功。我还有什么需要做的吗?或者这是正确的行为吗?
我正在运行一个两节点群集,默认复制为2。下面是开发的代码。非常感谢您的帮助。

package inverika.test.retail;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import org.apache.hadoop.mapreduce.Reducer;

public class CategoryCount {

    public static class CategoryMapper
        extends Mapper<LongWritable, Text, Text, IntWritable>    {

        private final static IntWritable one = new IntWritable(1);
        private String[] columns = new String[8];

        @Override
        public void map(LongWritable key, Text value, Context context)
                throws     IOException, InterruptedException {
            columns = value.toString().split(",");  
            context.write(new Text(columns[4]), one);
        }
    }

    public static class CategoryReducer
        extends Reducer< Text, IntWritable, Text, IntWritable>    {

        @Override
        public void reduce(Text key, Iterable<IntWritable>  values, Context context)
                throws     IOException, InterruptedException {

                int sum = 0;

                for (IntWritable value :  values) {
                        sum += value.get();
                }
               context.write(key, new IntWritable(sum));
        }
    }

    public static void main(String args[]) throws Exception    {
        if (args.length != 2)  {
                System.err.println("Usage: CategoryCount <input Path> <output Path>");
                System.exit(-1);
        } 

        Configuration conf = new Configuration();
        conf.set("mapred.textoutputformat.separator", ",");
        conf.set("mapred.max.split.size", "16777216");   // 16 MB

        Job job = new Job(conf, "Retail Category Count");
        job.setJarByClass(CategoryCount.class);
        job.setMapperClass(CategoryMapper.class);
        job.setReducerClass(CategoryReducer.class);
        job.setInputFormatClass(CombinedInputFormat.class);
        //CombineFileInputFormat.setMaxInputSplitSize(job, 16777216);
        CombinedInputFormat.setMaxInputSplitSize(job, 16777216);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        FileInputFormat.addInputPath(job, new Path(args[0]) );
        FileOutputFormat.setOutputPath(job, new Path(args[1]) );
        //job.submit();
        //System.exit(job.waitForCompletion(false) ?  0 : 1);
        System.exit(job.waitForCompletion(true) ?  0 : 1);
    }
}

下面是实现的组合文件输入格式

package inverika.test.retail;

import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.LineRecordReader;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;

public class CombinedInputFormat extends CombineFileInputFormat<LongWritable, Text> {

    @Override
    public RecordReader<LongWritable, Text>
            createRecordReader(InputSplit split, TaskAttemptContext context)
                    throws IOException {

        CombineFileRecordReader<LongWritable, Text> reader = 
                new CombineFileRecordReader<LongWritable, Text>(
                        (CombineFileSplit) split, context, myCombineFileRecordReader.class);        
        return reader;
    }

    public static class myCombineFileRecordReader extends RecordReader<LongWritable, Text> {
        private LineRecordReader lineRecordReader = new LineRecordReader();

        public myCombineFileRecordReader(CombineFileSplit split, 
                TaskAttemptContext context, Integer index) throws IOException {

            FileSplit fileSplit = new FileSplit(split.getPath(index), 
                                                split.getOffset(index),
                                                split.getLength(index), 
                                                split.getLocations());
            lineRecordReader.initialize(fileSplit, context);
        }

        @Override
        public void initialize(InputSplit inputSplit, TaskAttemptContext context)
                throws IOException, InterruptedException {
            //linerecordReader.initialize(inputSplit, context);
        }

        @Override
        public void close() throws IOException {
            lineRecordReader.close();
        }

        @Override
        public float getProgress() throws IOException {
            return lineRecordReader.getProgress();
        }

        @Override
        public LongWritable getCurrentKey() throws IOException,
                InterruptedException {
            return lineRecordReader.getCurrentKey();
        }

        @Override
        public Text getCurrentValue() throws IOException, InterruptedException {
            return lineRecordReader.getCurrentValue();
        }

        @Override
        public boolean nextKeyValue() throws IOException, InterruptedException {
            return lineRecordReader.nextKeyValue();
        }        
    }
}
ecbunoof

ecbunoof1#

使用时需要设置最大拆分大小 CombineFileInputFormat 作为输入格式类。或者,当所有块来自同一机架时,您可能只会得到一个Map器。
您可以通过以下方式之一实现这一点:
打电话给 CombineFileInputFormat.setMaxSplitSize() 方法
mapreduce.input.fileinputformat.split.maxsize 或者 mapred.max.split.size (已弃用)配置参数
对于exmaple,通过发出以下调用

job.getConfiguration().setLong("mapreduce.input.fileinputformat.split.maxsize", (long)(256*1024*1024));

您正在将最大拆分大小设置为256mb。
参考文献:
https://hadoop.apache.org/docs/r2.2.0/api/org/apache/hadoop/mapreduce/lib/input/combinefileinputformat.html
http://mail-archives.apache.org/mod_mbox/hadoop-common-user/201004.mbox/%3c35374.30384.qm@web63402.mail.re1.yahoo.com%3e

h79rfbju

h79rfbju2#

如果在使用combinefileinputformat时指定了maxsplitsize,那么将合并同一节点上的块以形成单个拆分,因此在您的场景中,所有文件似乎都位于同一节点上,因此它们仅构成单个拆分。因此只有一个Map器。
有关详细信息,请参阅combinefileinputformat文档https://hadoop.apache.org/docs/current/api/org/apache/hadoop/mapred/lib/combinefileinputformat.html

相关问题