java—我们可以在mapreduce代码中的mapper类的setup方法中放入一些计算任务吗

hmtdttj4  于 2021-05-29  发布在  Hadoop
关注(0)|答案(2)|浏览(496)

我在mapper类中使用了setup()方法。还有一个用户定义的方法apriorigenk(),在mapper类中定义,并在map()方法中调用。
现在的问题是:无论我知道什么,map方法都为每一行输入调用。假设有100行,那么这个方法调用100次。map方法每次相应地调用apriorigenk方法。但每次调用map方法时,不需要在map方法内调用apriorigenk。i、 apriorigenk方法的结果是map方法所有输入行的共同结果。apriorigenk方法非常占用cpu,因此在反复调用时会增加计算时间。我们能不能设法一次调用apriorigenk,每次在map方法中使用它。我曾尝试将apriorigen保存在setup方法中,以便只能调用一次,但令人惊讶的是,它在很大程度上减慢了执行速度。
这是我的密码:

  1. import dataStructuresV2.ItemsetTrie;
  2. public class AprioriTrieMapper extends Mapper<Object, Text, Text, IntWritable>
  3. {
  4. public static enum State
  5. {
  6. UPDATED
  7. }
  8. private final static IntWritable one = new IntWritable(1);
  9. private Text itemset = new Text();
  10. private Configuration conf;
  11. private StringTokenizer fitemset; // store one line of previous output file of frequent itemsets
  12. private ItemsetTrie trieLk_1 = null; // prefix tree to store candidate (k-1)-itemsets of previous pass
  13. private int k; // itemsetSize or iteration no.
  14. // private ItemsetTrie trieCk = null; // prefix tree to store candidate k-itemsets
  15. public void setup(Context context) throws IOException, InterruptedException
  16. {
  17. conf = context.getConfiguration();
  18. URI[] previousOutputURIs = Job.getInstance(conf).getCacheFiles();
  19. k = conf.getInt("k", k);
  20. trieLk_1 = new ItemsetTrie();
  21. for (URI previousOutputURI : previousOutputURIs)
  22. {
  23. Path previousOutputPath = new Path(previousOutputURI.getPath());
  24. String previousOutputFileName = previousOutputPath.getName().toString();
  25. filterItemset(previousOutputFileName, trieLk_1);
  26. }
  27. // trieCk = aprioriGenK(trieLk_1, k-1); // candidate generation from prefix tree of size k-1
  28. }// end method setup
  29. //trim count from each line and store only itemset
  30. private void filterItemset(String fileName, ItemsetTrie trieLk_1)
  31. {
  32. try
  33. {
  34. BufferedReader fis = new BufferedReader(new FileReader(fileName));
  35. String line = null;
  36. // trieLk_1 = new ItemsetTrie();
  37. while ((line = fis.readLine()) != null)
  38. {
  39. fitemset = new StringTokenizer(line, "\t");
  40. trieLk_1.insertCandidateItemset(fitemset.nextToken());
  41. }
  42. fis.close();
  43. }
  44. catch (IOException ioe)
  45. {
  46. System.err.println("Caught exception while parsing the cached file '" + fileName + "' : " + StringUtils.stringifyException(ioe));
  47. }
  48. }// end method filterItemset
  49. public void map(Object key, Text value, Context context) throws IOException, InterruptedException
  50. {
  51. StringTokenizer items = new StringTokenizer(value.toString().toLowerCase()," \t\n\r\f,.:;?![]'"); // tokenize transaction
  52. LinkedList <String>itemlist = new LinkedList<String>(); // store the tokens or itemse of transaction
  53. LinkedList <String>listCt; // list of subset of transaction that are candidates
  54. // Map <String, Integer>mapCt; // list of subset of transaction that are candidates with support count
  55. ItemsetTrie trieCk = null; // prefix tree to store candidate k-itemsets
  56. StringTokenizer candidate;
  57. trieCk = aprioriGenK(trieLk_1, k-1); // candidate generation from prefix tree of size k-1
  58. if(trieCk.numberOfCandidate() > 0)
  59. context.getCounter(State.UPDATED).increment(1); // increment counter
  60. // optimization: if transaction size is less than candidate size then it should not be checked
  61. if(items.countTokens() >= k)
  62. {
  63. while (items.hasMoreTokens()) // add tokens of transaction to list
  64. itemlist.add(items.nextToken());
  65. // we use either simple linkedlist listCt or map mapCt
  66. listCt = trieCk.candidateSupportCount1(itemlist, k);
  67. for(String listCtMember : listCt) // generate (key, value) pair. work on listCt
  68. {
  69. candidate = new StringTokenizer(listCtMember, "\n");
  70. if(candidate.hasMoreTokens())
  71. {
  72. itemset.set(candidate.nextToken()); context.write(itemset, one);
  73. }
  74. }
  75. } // end if
  76. } // end method map
  77. // generating candidate prefix tree of size k using prefix tree of size k-1
  78. public ItemsetTrie aprioriGenK(ItemsetTrie trieLk_1, int itemsetSize) // itemsetSize of trie Lk_1
  79. {
  80. ItemsetTrie candidateTree = new ItemsetTrie(); // local prefix tree store candidates k-itemsets
  81. trieLk_1.candidateGenK(candidateTree, itemsetSize); // new candidate prefix tree obtained
  82. return candidateTree; // return prefix tree of size k
  83. } // end method aprioriGenK
  84. } //end class TrieBasedSPCItemsetMapper

这是我的驾驶课:
public class aprioritrie{private static logger log=logger.getlogger(aprioritrie.class);

  1. public static void main(String[] args) throws Exception
  2. {
  3. Configuration conf = new Configuration();
  4. // String minsup = "1";
  5. String minsup = null;
  6. List<String> otherArgs = new ArrayList<String>();
  7. for (int i=0; i < args.length; ++i)
  8. {
  9. if ("-minsup".equals(args[i]))
  10. minsup = args[++i];
  11. else
  12. otherArgs.add(args[i]);
  13. }
  14. conf.set("min_sup", minsup);
  15. log.info("Started counting 1-itemset ....................");
  16. Date date; long startTime, endTime; // for recording start and end time of job
  17. date = new Date(); startTime = date.getTime(); // starting timer
  18. // Phase-1
  19. Job job = Job.getInstance(conf, "AprioriTrie: Iteration-1");
  20. job.setJarByClass(aprioriBasedAlgorithms.AprioriTrie.class);
  21. job.setMapperClass(OneItemsetMapper.class);
  22. job.setCombinerClass(OneItemsetCombiner.class);
  23. job.setReducerClass(OneItemsetReducer.class);
  24. // job.setOutputKeyClass(Text.class);
  25. job.setOutputKeyClass(IntWritable.class);
  26. job.setOutputValueClass(IntWritable.class);
  27. job.setInputFormatClass(NLineInputFormat.class);
  28. NLineInputFormat.setNumLinesPerSplit(job, 10000); // set specific no. of line of records
  29. // Path inputPath = new Path("hdfs://hadoopmaster:9000/user/hduser/sample-transactions1/");
  30. Path inputPath = new Path(otherArgs.get(0));
  31. // Path outputPath = new Path("hdfs://hadoopmaster:9000/user/hduser/AprioriTrie/fis-1");
  32. Path outputPath = new Path(otherArgs.get(1)+"/fis-1");
  33. FileInputFormat.setInputPaths(job, inputPath);
  34. FileOutputFormat.setOutputPath(job, outputPath);
  35. if(job.waitForCompletion(true))
  36. log.info("SUCCESSFULLY- Completed Frequent 1-itemsets Geneation.");
  37. else
  38. log.info("ERROR- Completed Frequent 1-itemsets Geneation.");
  39. // Phase-k >=2
  40. int iteration = 1; long counter;
  41. do
  42. {
  43. Configuration conf2 = new Configuration();
  44. conf2.set("min_sup", minsup);
  45. conf2.setInt("k", iteration+1);
  46. log.info("Started counting "+(iteration+1)+"-itemsets ..................");
  47. Job job2 = Job.getInstance(conf2, "AprioriTrie: Iteration-"+(iteration+1));
  48. job2.setJarByClass(aprioriBasedAlgorithms.AprioriTrie.class);
  49. job2.setMapperClass(AprioriTrieMapper.class);
  50. job2.setCombinerClass(ItemsetCombiner.class);
  51. job2.setReducerClass(ItemsetReducer.class);
  52. job2.setOutputKeyClass(Text.class);
  53. job2.setOutputValueClass(IntWritable.class);
  54. job2.setNumReduceTasks(4); // break the output in 3 files
  55. job2.setInputFormatClass(NLineInputFormat.class);
  56. NLineInputFormat.setNumLinesPerSplit(job2, 10000);
  57. FileSystem fs = FileSystem.get(new URI("hdfs://hadoopmaster:9000"), conf2);
  58. // FileStatus[] status = fs.listStatus(new Path("hdfs://hadoopmaster:9000/user/hduser/AprioriTrie/fis-"+iteration+"/"));
  59. FileStatus[] status = fs.listStatus(new Path(otherArgs.get(1)+"/fis-"+iteration));
  60. for (int i=0;i<status.length;i++)
  61. {
  62. job2.addCacheFile(status[i].getPath().toUri()); // add all files inside output fis
  63. //job2.addFileToClassPath(status[i].getPath());
  64. }
  65. // input is same for these job
  66. // outputPath = new Path("hdfs://hadoopmaster:9000/user/hduser/AprioriTrie/fis-"+(iteration+1));
  67. outputPath = new Path(otherArgs.get(1)+"/fis-"+(iteration+1));
  68. FileInputFormat.setInputPaths(job2, inputPath);
  69. FileOutputFormat.setOutputPath(job2, outputPath);
  70. if(job2.waitForCompletion(true))
  71. log.info("SUCCESSFULLY- Completed Frequent "+(iteration+1)+"-itemsets Generation.");
  72. else
  73. log.info("ERROR- Completed Frequent "+(iteration+1)+"-itemsets Generation.");
  74. iteration++;
  75. counter = job2.getCounters().findCounter(AprioriTrieMapper.State.UPDATED).getValue();
  76. } while (counter > 0);
  77. date = new Date(); endTime = date.getTime(); //end timer
  78. log.info("Total Time (in milliseconds) = "+ (endTime-startTime));
  79. log.info("Total Time (in seconds) = "+ (endTime-startTime)*0.001F);
  80. }

}

ikfrs5lh

ikfrs5lh1#

您可以在安装程序调用之后将该函数调用添加到Map器的run方法中。这将确保每个Map器只调用一次您的方法。

  1. public class Mymapper extends Mapper<LongWritable,Text,Text,IntWritable>
  2. {
  3. public void map(LongWritable key,Text value,Context context) throws IOException,InterruptedException
  4. {
  5. //do something
  6. }
  7. public void myfunc(String parm)
  8. {
  9. System.out.println("parm="+parm);
  10. }
  11. public void run(Context context) throws IOException, InterruptedException
  12. {
  13. setup(context);
  14. myfunc("hello");
  15. while(context.nextKeyValue())
  16. {
  17. map(context.getCurrentKey(), context.getCurrentValue(), context);
  18. }
  19. }
  20. }
展开查看全部
brtdzjyr

brtdzjyr2#

我对mapper类进行了更改,但是生成的代码非常慢,似乎它对 aprioriGenK() .
这是我修改过的代码。

  1. public class AprioriTrieMapper extends Mapper<Object, Text, Text, IntWritable>
  2. {
  3. public static enum State
  4. {
  5. UPDATED
  6. }
  7. private final static IntWritable one = new IntWritable(1);
  8. private Text itemset = new Text();
  9. private Configuration conf;
  10. private StringTokenizer fitemset; // store one line of previous output file of frequent itemsets
  11. private ItemsetTrie trieLk_1 = null; // prefix tree to store candidate (k-1)-itemsets of previous pass
  12. private int k; // itemsetSize or iteration no.
  13. private ItemsetTrie trieCk = null; // prefix tree to store candidate k-itemsets
  14. public void setup(Context context) throws IOException, InterruptedException
  15. {
  16. conf = context.getConfiguration();
  17. URI[] previousOutputURIs = Job.getInstance(conf).getCacheFiles();
  18. k = conf.getInt("k", k);
  19. trieLk_1 = new ItemsetTrie();
  20. for (URI previousOutputURI : previousOutputURIs)
  21. {
  22. Path previousOutputPath = new Path(previousOutputURI.getPath());
  23. String previousOutputFileName = previousOutputPath.getName().toString();
  24. filterItemset(previousOutputFileName, trieLk_1);
  25. }
  26. // trieCk = aprioriGenK(trieLk_1, k-1); // candidate generation from prefix tree of size k-1
  27. }// end method setup
  28. //trim count from each line and store only itemset
  29. private void filterItemset(String fileName, ItemsetTrie trieLk_1)
  30. {
  31. try
  32. {
  33. BufferedReader fis = new BufferedReader(new FileReader(fileName));
  34. String line = null;
  35. // trieLk_1 = new ItemsetTrie();
  36. while ((line = fis.readLine()) != null)
  37. {
  38. fitemset = new StringTokenizer(line, "\t");
  39. trieLk_1.insertCandidateItemset(fitemset.nextToken());
  40. }
  41. fis.close();
  42. }
  43. catch (IOException ioe)
  44. {
  45. System.err.println("Caught exception while parsing the cached file '" + fileName + "' : " + StringUtils.stringifyException(ioe));
  46. }
  47. }// end method filterItemset
  48. //run method
  49. public void run(Context context) throws IOException, InterruptedException
  50. {
  51. setup(context);
  52. trieCk = aprioriGenK(trieLk_1, k-1); // candidate generation from prefix tree of size k-1
  53. if(trieCk.numberOfCandidate() > 0)
  54. context.getCounter(State.UPDATED).increment(1); // increment counter
  55. while(context.nextKeyValue())
  56. {
  57. map(context.getCurrentKey(), context.getCurrentValue(), context);
  58. }
  59. }// end method run
  60. public void map(Object key, Text value, Context context) throws IOException, InterruptedException
  61. {
  62. StringTokenizer items = new StringTokenizer(value.toString().toLowerCase()," \t\n\r\f,.:;?![]'"); // tokenize transaction
  63. LinkedList <String>itemlist = new LinkedList<String>(); // store the tokens or itemse of transaction
  64. LinkedList <String>listCt; // list of subset of transaction that are candidates
  65. // Map <String, Integer>mapCt; // list of subset of transaction that are candidates with support count
  66. // ItemsetTrie trieCk = null; // prefix tree to store candidate k-itemsets
  67. StringTokenizer candidate;
  68. // if(context.getCounter(State.UPDATED).getValue() == 0)
  69. // {
  70. // trieCk = aprioriGenK(trieLk_1, k-1); // candidate generation from prefix tree of size k-1
  71. // if(trieCk.numberOfCandidate() > 0)
  72. // context.getCounter(State.UPDATED).increment(1); // increment counter
  73. // }
  74. // optimization: if transaction size is less than candidate size then it should not be checked
  75. if(items.countTokens() >= k)
  76. {
  77. while (items.hasMoreTokens()) // add tokens of transaction to list
  78. itemlist.add(items.nextToken());
  79. // we use either simple linkedlist listCt or map mapCt
  80. listCt = trieCk.candidateSupportCount1(itemlist, k);
  81. for(String listCtMember : listCt) // generate (key, value) pair. work on listCt
  82. {
  83. candidate = new StringTokenizer(listCtMember, "\n");
  84. if(candidate.hasMoreTokens())
  85. {
  86. itemset.set(candidate.nextToken()); context.write(itemset, one);
  87. }
  88. }
  89. } // end if
  90. } // end method map
  91. // generating candidate prefix tree of size k using prefix tree of size k-1
  92. public ItemsetTrie aprioriGenK(ItemsetTrie trieLk_1, int itemsetSize) // itemsetSize of trie Lk_1
  93. {
  94. ItemsetTrie candidateTree = new ItemsetTrie(); // local prefix tree store candidates k-itemsets
  95. trieLk_1.candidateGenK(candidateTree, itemsetSize); // new candidate prefix tree obtained
  96. return candidateTree; // return prefix tree of size k
  97. } // end method aprioriGenK
  98. } //end class TrieBasedSPCItemsetMapper
展开查看全部

相关问题