设为首页 收藏本站
查看: 710|回复: 0

[经验分享] Hadoop报错“could only be replicated to 0 nodes, instead of 1” .

[复制链接]
累计签到:1 天
连续签到:1 天
发表于 2016-12-10 11:22:27 | 显示全部楼层 |阅读模式
Hadoop报错“could only be replicated to 0 nodes, instead of 1”
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop fs -put conf input10/07/18 12:31:05 INFO hdfs.DFSClient: org.apache.hadoop.ipc.RemoteException:java.io.IOException: File /user/root/input/log4j.properties could only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:05 WARN hdfs.DFSClient: NotReplicatedYetException sleeping /user/root/input/log4j.properties retries left 4
10/07/18 12:31:05 INFO hdfs.DFSClient: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /user/root/input/log4j.propertiescould only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:05 WARN hdfs.DFSClient: NotReplicatedYetException sleeping /user/root/input/log4j.properties retries left 3
10/07/18 12:31:06 INFO hdfs.DFSClient: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /user/root/input/log4j.properties could only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:06 WARN hdfs.DFSClient: NotReplicatedYetException sleeping /user/root/input/log4j.properties retries left 2
10/07/18 12:31:08 INFO hdfs.DFSClient: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /user/root/input/log4j.propertiescould only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:08 WARN hdfs.DFSClient: NotReplicatedYetException sleeping /user/root/input/log4j.properties retries left 1
10/07/18 12:31:11 WARN hdfs.DFSClient: DataStreamer Exception: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /user/root/input/log4j.properties could only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:11 WARN hdfs.DFSClient: Error Recovery for block null bad datanode[0] nodes == null
10/07/18 12:31:11 WARN hdfs.DFSClient: Could not get block locations. Source file "/user/root/input/log4j.properties" - Aborting...
put: java.io.IOException: File /user/root/input/log4j.properties could only be replicated to 0 nodes, instead of 1

  
  好长到一段错误代码,呵呵。刚碰到这个问题到时候上网搜了以下,也没有一个很标准的解决方法。大致上说是由于不一致状态导致的。
  办法倒是有一个,只不过会丢失掉已有数据,请慎重使用。
  1、先把服务都停掉
  2、格式化namenode
  3、重新启动所有服务
  4、可以进行正常操作了
  下面是我到解决步骤
  root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/stop-all.sh
stopping jobtracker
localhost: stopping tasktracker
no namenode to stop
localhost: no datanode to stop
localhost: stopping secondarynamenode
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop namenode -format
10/07/18 12:46:23 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = scutshuxue-desktop/127.0.1.1
STARTUP_MSG: args = [-format]
STARTUP_MSG: version = 0.19.2
STARTUP_MSG: build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.19 -r 789657; compiled by 'root' on Tue Jun 30 12:40:50 EDT 2009
************************************************************/
Re-format filesystem in /tmp/hadoop-root/dfs/name ? (Y or N) Y
10/07/18 12:46:24 INFO namenode.FSNamesystem: fsOwner=root,root
10/07/18 12:46:24 INFO namenode.FSNamesystem: supergroup=supergroup
10/07/18 12:46:24 INFO namenode.FSNamesystem: isPermissionEnabled=true
10/07/18 12:46:25 INFO common.Storage: Image file of size 94 saved in 0 seconds.
10/07/18 12:46:25 INFO common.Storage: Storage directory /tmp/hadoop-root/dfs/name has been successfully formatted.
10/07/18 12:46:25 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at scutshuxue-desktop/127.0.1.1
************************************************************/
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# ls
bin docs lib README.txt
build.xml hadoop-0.19.2-ant.jar libhdfs src
c++ hadoop-0.19.2-core.jar librecordio test-txt
CHANGES.txt hadoop-0.19.2-examples.jar LICENSE.txt webapps
conf hadoop-0.19.2-test.jar logs
contrib hadoop-0.19.2-tools.jar NOTICE.txt
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/start-all.sh
starting namenode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-namenode-scutshuxue-desktop.out
localhost: starting datanode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-datanode-scutshuxue-desktop.out
localhost: starting secondarynamenode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-secondarynamenode-scutshuxue-desktop.out
starting jobtracker, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-jobtracker-scutshuxue-desktop.out
localhost: starting tasktracker, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-tasktracker-scutshuxue-desktop.out
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop fs -put conf input
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop dfs -ls
Found 1 items
drwxr-xr-x - root supergroup 0 2010-07-18 12:47 /user/root/input


  
  好长到一段错误代码,呵呵。刚碰到这个问题到时候上网搜了以下,也没有一个很标准的解决方法。大致上说是由于不一致状态导致的。
  办法倒是有一个,只不过会丢失掉已有数据,请慎重使用。
  1、先把服务都停掉
  2、格式化namenode
  3、重新启动所有服务
  4、可以进行正常操作了
  下面是我到解决步骤
  root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/stop-all.sh
stopping jobtracker
localhost: stopping tasktracker
no namenode to stop
localhost: no datanode to stop
localhost: stopping secondarynamenode
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop namenode -format
10/07/18 12:46:23 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = scutshuxue-desktop/127.0.1.1
STARTUP_MSG: args = [-format]
STARTUP_MSG: version = 0.19.2
STARTUP_MSG: build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.19 -r 789657; compiled by 'root' on Tue Jun 30 12:40:50 EDT 2009
************************************************************/
Re-format filesystem in /tmp/hadoop-root/dfs/name ? (Y or N) Y
10/07/18 12:46:24 INFO namenode.FSNamesystem: fsOwner=root,root
10/07/18 12:46:24 INFO namenode.FSNamesystem: supergroup=supergroup
10/07/18 12:46:24 INFO namenode.FSNamesystem: isPermissionEnabled=true
10/07/18 12:46:25 INFO common.Storage: Image file of size 94 saved in 0 seconds.
10/07/18 12:46:25 INFO common.Storage: Storage directory /tmp/hadoop-root/dfs/name has been successfully formatted.
10/07/18 12:46:25 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at scutshuxue-desktop/127.0.1.1
************************************************************/
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# ls
bin docs lib README.txt
build.xml hadoop-0.19.2-ant.jar libhdfs src
c++ hadoop-0.19.2-core.jar librecordio test-txt
CHANGES.txt hadoop-0.19.2-examples.jar LICENSE.txt webapps
conf hadoop-0.19.2-test.jar logs
contrib hadoop-0.19.2-tools.jar NOTICE.txt
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/start-all.sh
starting namenode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-namenode-scutshuxue-desktop.out
localhost: starting datanode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-datanode-scutshuxue-desktop.out
localhost: starting secondarynamenode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-secondarynamenode-scutshuxue-desktop.out
starting jobtracker, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-jobtracker-scutshuxue-desktop.out
localhost: starting tasktracker, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-tasktracker-scutshuxue-desktop.out
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop fs -put conf input
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop dfs -ls
Found 1 items
drwxr-xr-x - root supergroup 0 2010-07-18 12:47 /user/root/input

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-312346-1-1.html 上篇帖子: 用 Hadoop 进行分布式数据处理(入门) 下篇帖子: Plans for a 0.21 Hadoop Release
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表