hadoop2.6.0 ha主名称节点启动失败

qgelzfjb  于 2021-07-15  发布在  Hadoop
关注(0)|答案(0)|浏览(335)

错误信息
端口信息
zk&namenode格式全部成功。
我检查了端口,没有任务程序占用这个端口。
当我第一次启动namenode时,我发现了这个问题;在我删除tmp文件夹并多次重新格式化之后,日志一直在说matser namenode无法启动:port in use 50070。下面是我的hdfs-site.xml配置:
hadoop1(主):

  1. <property>
  2. <name>dfs.replication</name>
  3. <value>3</value>
  4. </property>
  5. <property>
  6. <name>dfs.nameservices</name>
  7. <value>gy-cluster</value>
  8. </property>
  9. <property>
  10. <name>dfs.ha.namenodes.gy-cluster</name>
  11. <value>nn1,nn2</value>
  12. </property>
  13. <property>
  14. <name>dfs.namenode.rpc-address.gy-cluster.nn1</name>
  15. <value>hadoop1:9000</value>
  16. </property>
  17. <property>
  18. <name>dfs.namenode.rpc-address.gy-cluster.nn2</name>
  19. <value>hadoop2:9000</value>
  20. </property>
  21. <property>
  22. <name>dfs.namenode.http-address.gy-cluster.nn1</name>
  23. <value>hadoop1:50070</value>
  24. </property>
  25. <property>
  26. <name>dfs.namenode.http-address.gy-cluster.nn2</name>
  27. <value>hadoop2:50070</value>
  28. </property>
  29. <property>
  30. <name>dfs.namenode.shared.edits.dir</name>
  31. <value>qjournal://hadoop1:8485;hadoop2:8485;hadoop3:8485/gy-cluster</value>
  32. </property>
  33. <property>
  34. <name>dfs.client.failover.proxy.provider.gy-cluster</name>
  35. <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  36. </property>
  37. <property>
  38. <name>dfs.ha.fencing.methods</name>
  39. <value>shell(/bin/true)</value>
  40. </property>
  41. <property>
  42. <name>dfs.ha.fencing.ssh.private-key-files</name>
  43. <value>/root/.ssh/id_rsa</value>
  44. </property>
  45. <property>
  46. <name>dfs.journalnode.edits.dir</name>
  47. <value>/home/hadoop/hdfs/tmp/journal</value>
  48. </property>
  49. <property>
  50. <name>dfs.ha.automatic-failover.enabled</name>
  51. <value>true</value>
  52. </property>
  53. <property>
  54. <name>dfs.namenode.name.dir</name>
  55. <value>file:/home/hadoop/hdfs/name</value>
  56. </property>
  57. <property>
  58. <name>dfs.datanode.data.dir</name>
  59. <value>file:/home/hadoop/hdfs/data</value>
  60. </property>
  61. <property>
  62. <name>dfs.webhdfs.enabled</name>
  63. <value>true</value>
  64. </property>
  65. <property>
  66. <name>dfs.permissions.enable</name>
  67. <value>false</value>
  68. </property>
  69. <property>
  70. <name>dfs.ha.namenode.id</name>
  71. <value>nn1</value>
  72. </property>

hadoop2(主)

  1. <property>
  2. <name>dfs.replication</name>
  3. <value>3</value>
  4. </property>
  5. <property>
  6. <name>dfs.nameservices</name>
  7. <value>gy-cluster</value>
  8. </property>
  9. <property>
  10. <name>dfs.ha.namenodes.gy-cluster</name>
  11. <value>nn1,nn2</value>
  12. </property>
  13. <property>
  14. <name>dfs.namenode.rpc-address.gy-cluster.nn1</name>
  15. <value>hadoop1:9000</value>
  16. </property>
  17. <property>
  18. <name>dfs.namenode.rpc-address.gy-cluster.nn2</name>
  19. <value>hadoop2:9000</value>
  20. </property>
  21. <property>
  22. <name>dfs.namenode.http-address.gy-cluster.nn1</name>
  23. <value>hadoop1:50070</value>
  24. </property>
  25. <property>
  26. <name>dfs.namenode.http-address.gy-cluster.nn2</name>
  27. <value>hadoop2:50070</value>
  28. </property>
  29. <property>
  30. <name>dfs.namenode.shared.edits.dir</name>
  31. <value>qjournal://hadoop1:8485;hadoop2:8485;hadoop3:8485/gy-cluster</value>
  32. </property>
  33. <property>
  34. <name>dfs.client.failover.proxy.provider.gy-cluster</name>
  35. <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  36. </property>
  37. <property>
  38. <name>dfs.ha.fencing.methods</name>
  39. <value>shell(/bin/true)</value>
  40. </property>
  41. <property>
  42. <name>dfs.ha.fencing.ssh.private-key-files</name>
  43. <value>/root/.ssh/id_rsa</value>
  44. </property>
  45. <property>
  46. <name>dfs.journalnode.edits.dir</name>
  47. <value>/home/hadoop/hdfs/tmp/journal</value>
  48. </property>
  49. <property>
  50. <name>dfs.ha.automatic-failover.enabled</name>
  51. <value>true</value>
  52. </property>
  53. <property>
  54. <name>dfs.namenode.name.dir</name>
  55. <value>file:/home/hadoop/hdfs/name</value>
  56. </property>
  57. <property>
  58. <name>dfs.datanode.data.dir</name>
  59. <value>file:/home/hadoop/hdfs/data</value>
  60. </property>
  61. <property>
  62. <name>dfs.webhdfs.enabled</name>
  63. <value>true</value>
  64. </property>
  65. <property>
  66. <name>dfs.permissions.enable</name>
  67. <value>false</value>
  68. </property>
  69. <property>
  70. <name>dfs.ha.namenode.id</name>
  71. <value>nn2</value>
  72. </property>

hadoop3(从属)

  1. <property>
  2. <name>dfs.replication</name>
  3. <value>3</value>
  4. </property>
  5. <property>
  6. <name>dfs.nameservices</name>
  7. <value>gy-cluster</value>
  8. </property>
  9. <property>
  10. <name>dfs.ha.namenodes.gy-cluster</name>
  11. <value>nn1,nn2</value>
  12. </property>
  13. <property>
  14. <name>dfs.namenode.rpc-address.gy-cluster.nn1</name>
  15. <value>hadoop1:9000</value>
  16. </property>
  17. <property>
  18. <name>dfs.namenode.rpc-address.gy-cluster.nn2</name>
  19. <value>hadoop2:9000</value>
  20. </property>
  21. <property>
  22. <name>dfs.namenode.http-address.gy-cluster.nn1</name>
  23. <value>hadoop1:50070</value>
  24. </property>
  25. <property>
  26. <name>dfs.namenode.http-address.gy-cluster.nn2</name>
  27. <value>hadoop2:50070</value>
  28. </property>
  29. <property>
  30. <name>dfs.namenode.shared.edits.dir</name>
  31. <value>qjournal://hadoop1:8485;hadoop2:8485;hadoop3:8485/gy-cluster</value>
  32. </property>
  33. <property>
  34. <name>dfs.client.failover.proxy.provider.gy-cluster</name>
  35. <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  36. </property>
  37. <property>
  38. <name>dfs.ha.fencing.methods</name>
  39. <value>shell(/bin/true)</value>
  40. </property>
  41. <property>
  42. <name>dfs.ha.fencing.ssh.private-key-files</name>
  43. <value>/root/.ssh/id_rsa</value>
  44. </property>
  45. <property>
  46. <name>dfs.journalnode.edits.dir</name>
  47. <value>/home/hadoop/hdfs/tmp/journal</value>
  48. </property>
  49. <property>
  50. <name>dfs.ha.automatic-failover.enabled</name>
  51. <value>true</value>
  52. </property>
  53. <property>
  54. <name>dfs.namenode.name.dir</name>
  55. <value>file:/home/hadoop/hdfs/name</value>
  56. </property>
  57. <property>
  58. <name>dfs.datanode.data.dir</name>
  59. <value>file:/home/hadoop/hdfs/data</value>
  60. </property>
  61. <property>
  62. <name>dfs.webhdfs.enabled</name>
  63. <value>true</value>
  64. </property>
  65. <property>
  66. <name>dfs.permissions.enable</name>
  67. <value>false</value>
  68. </property>

如果有人知道这个问题,请告诉我,非常感谢。

暂无答案!

目前还没有任何答案,快来回答吧!

相关问题