hadoop java错误:线程“main”java.lang.classnotfoundexception中出现异常:com.packt.ch3.etl.parseweblogs

deikduxw  于 2021-06-02  发布在  Hadoop
关注(0)|答案(1)|浏览(393)

我是hadoop新手。我遵循了一些教程,并尝试在版本为2.7.0的hadoop集群中运行该示例,不幸的是,我得到了以下错误:

$ javac *.java

$ jar cvf myjar.jar *.class

$ hadoop jar ./myjar.jar com.packt.ch3.etl.ParseWeblogs /user/hadoop/apache_clf.txt /user/hadoop/apache_clf_tsv

        Exception in thread "main" java.lang.ClassNotFoundException: com.packt.ch3.etl.ParseWeblogs
            at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
            at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
            at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
            at java.lang.Class.forName0(Native Method)
            at java.lang.Class.forName(Class.java:348)
            at org.apache.hadoop.util.RunJar.run(RunJar.java:214)
            at org.apache.hadoop.util.RunJar.main(RunJar.java:136)

我的类路径设置如下:

export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$ANT_HOME/lib/ant-launcher.jar

export CLASSPATH="/usr/local/hadoop/share/hadoop/common/hadoop-common-2.7.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.0.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-cli-1.2.jar:$CLASSPATH"

文件parseweblogs.java的源代码如下:

package com.packt.ch3.etl;

        import org.apache.hadoop.conf.Configuration;
        import org.apache.hadoop.conf.Configured;
        import org.apache.hadoop.fs.Path;
        import org.apache.hadoop.io.Text;
        import org.apache.hadoop.mapreduce.Job;
        import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
        import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
        import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
        import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
        import org.apache.hadoop.util.Tool;
        import org.apache.hadoop.util.ToolRunner;

        public class ParseWeblogs extends Configured implements Tool {

            public int run(String[] args) throws Exception {

                Path inputPath = new Path(args[0]);
                Path outputPath = new Path(args[1]);

                Configuration conf = getConf();
                Job weblogJob = new Job(conf, "ParseWeblogs");
            //weblogJob.setJarByClass(ParseWeblogs.class);
                weblogJob.setJobName("Weblog Transformer");
                weblogJob.setJarByClass(getClass());
                weblogJob.setNumReduceTasks(0);
                weblogJob.setMapperClass(CLFMapper.class);        
                weblogJob.setMapOutputKeyClass(Text.class);
                weblogJob.setMapOutputValueClass(Text.class);
                weblogJob.setOutputKeyClass(Text.class);
                weblogJob.setOutputValueClass(Text.class);
                weblogJob.setInputFormatClass(TextInputFormat.class);
                weblogJob.setOutputFormatClass(TextOutputFormat.class);

            //weblogJob.setJarByClass(ParseWeblogs.class);

                FileInputFormat.setInputPaths(weblogJob, inputPath);
                FileOutputFormat.setOutputPath(weblogJob, outputPath);

                if(weblogJob.waitForCompletion(true)) {
                    return 0;
                }
                return 1;
            }

            public static void main( String[] args ) throws Exception {
                int returnCode = ToolRunner.run(new ParseWeblogs(), args);
                System.exit(returnCode);
            }

        }

    ++++++++++++++++++++++++++++++++++++++++

文件clfmapper.java的源代码如下:

package com.packt.ch3.etl;

    import java.io.IOException;
    import java.text.ParseException;
    import java.text.SimpleDateFormat;
    import java.util.Date;
    import java.util.regex.Matcher;
    import java.util.regex.Pattern;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.mapreduce.Mapper;

    public class CLFMapper extends Mapper<Object, Text, Text, Text> {

        private SimpleDateFormat dateFormatter = 
                new SimpleDateFormat("dd/MMM/yyyy:HH:mm:ss Z");
        private Pattern p = Pattern.compile("^([\\d.]+) (\\S+) (\\S+) \\[([\\w:/]+\\s[+\\-]\\d{4})\\] \"(\\w+) (.+?) (.+?)\" (\\d+) (\\d+) \"([^\"]+|(.+?))\" \"([^\"]+|(.+?))\"", Pattern.DOTALL);

        private Text outputKey = new Text();
        private Text outputValue = new Text();
        @Override
        protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            String entry = value.toString();
            Matcher m = p.matcher(entry);
            if (!m.matches()) {
                return;
            }
            Date date = null;
            try {
                date = dateFormatter.parse(m.group(4));
            } catch (ParseException ex) {
                return;
            }
            outputKey.set(m.group(1)); //ip
            StringBuilder b = new StringBuilder();
            b.append(date.getTime()); //timestamp
            b.append('\t');
            b.append(m.group(6)); //page
            b.append('\t');
            b.append(m.group(8)); //http status
            b.append('\t');
            b.append(m.group(9)); //bytes
            b.append('\t');
            b.append(m.group(12)); //useragent
            outputValue.set(b.toString());
            context.write(outputKey, outputValue);
        }

    }

有谁能帮我一下吗?我试过几种解决办法,但还没想出来。谢谢!

7eumitmz

7eumitmz1#

你能试试这个驾驶舱吗。我做了一些编辑。让我们看看它是否有效。

package trail;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class ParseWeblogs extends Configured implements Tool {

    /**
     * @param args
     * @throws Exception
     */
    public static void main(String[] args) {
        // TODO Auto-generated method stub
        Configuration conf = new Configuration();
        try {
            int res = ToolRunner.run(conf, new ParseWeblogs(), args);
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }

    public int run(String[] args) {
        // TODO Auto-generated method stub
        Configuration conf = new Configuration();
        /*
         * Arguments
         */
        String source = args[0];
        String dest = args[1];
        FileSystem fs = null;
        try {
            fs = FileSystem.get(conf);
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        Path in = new Path(source);
        Path out = new Path(dest);

        Job weblogJob = null;
        try {
            weblogJob = new Job(conf, "ParseWeblogs");
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        weblogJob.setJarByClass(ParseWeblogs.class);
        weblogJob.setMapperClass(CLFMapper.class);
        weblogJob.setMapOutputKeyClass(Text.class);
        weblogJob.setMapOutputValueClass(Text.class);
        weblogJob.setOutputKeyClass(Text.class);
        weblogJob.setOutputValueClass(Text.class);
        try {
            TextInputFormat.addInputPath(weblogJob, in);
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        /*
         * Delete output dir if exist
         */
        try {
            if (fs.exists(out)) {
                fs.delete(out, true);
            }
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        TextOutputFormat.setOutputPath(weblogJob, out);
        boolean success = false ;
        try {
            success = weblogJob.waitForCompletion(true);
        } catch (ClassNotFoundException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        return success ?0:1;
    }
}

相关问题