设为首页 收藏本站
查看: 871|回复: 0

[经验分享] 实战hadoop2.6.3+zookeeper3.4.6+hbase1.0.2高可用集群方案

[复制链接]

尚未签到

发表于 2018-10-29 11:50:29 | 显示全部楼层 |阅读模式
  实战hadoop2.6.3+zookeeper3.4.6+hbase1.0.2高可用集群方案
  一、安装前准备
  1.环境5台
DSC0000.png

  2、修改hosts文件
  [root@hadoop01 ~]# cat /etc/hosts
  192.168.10.201hadoop01
  192.168.10.202hadoop02
  192.168.10.203hadoop03
  192.168.10.204hadoop04
  192.168.10.205hadoop05
  3、ssh 免密码登录
  在每台操作
  [root@hadoop01 ~]# mkidr ~/.ssh
  [root@hadoop01 ~]# chmod 700 ~/.ssh
  [root@hadoop01 ~]#cd ~/.ssh/
  [root@hadoop01 .ssh ]ssh-keygen -t rsa
  五台操作完成 后做成公钥文件
  [root@hadoop01 .ssh ] ssh hadoop02 cat /root/.ssh/id_rsa.pub >> authorized_keys
  [root@hadoop01 .ssh ] ssh hadoop03 cat /root/.ssh/id_rsa.pub >> authorized_keys
  [root@hadoop01 .ssh ] ssh hadoop04 cat /root/.ssh/id_rsa.pub >> authorized_keys
  [root@hadoop01 .ssh ] ssh hadoop05 cat /root/.ssh/id_rsa.pub >> authorized_keys
  [root@hadoop01 .ssh ] ssh hadoop01 cat /root/.ssh/id_rsa.pub >> authorized_keys
  [root@hadoop01 .ssh]# chmod 600 authorized_keys
  [root@hadoop01 .ssh]# scp authorized_keys hadoop02:/root/.ssh/
  [root@hadoop01 .ssh]# scp authorized_keys hadoop03:/root/.ssh/
  [root@hadoop01 .ssh]# scp authorized_keys hadoop04:/root/.ssh/
  [root@hadoop01 .ssh]# scp authorized_keys hadoop05:/root/.ssh/
  测试ssh信任
  [root@hadoop01 .ssh]# ssh hadoop02 date
  Mon Aug  8 11:07:23 CST 2016
  [root@hadoop01 .ssh]# ssh hadoop03 date
  Mon Aug  8 11:07:26 CST 2016
  [root@hadoop01 .ssh]# ssh hadoop04 date
  Mon Aug  8 11:07:29 CST 2016
  [root@hadoop01 .ssh]# ssh hadoop05 date
  5.服务时间同步(五台操作)
  yum -y install ntpdate
  [root@hadoop01 .ssh]# crontab -l
  0 * * * * /usr/sbin/ntpdate 0.rhel.pool.ntp.org && /sbin/clock -w
  可以采用别的方案同步时间
  6.修改文件打开数(五台操作)
  [root@hadoop01 ~]# vi /etc/security/limits.conf
  root soft nofile 65535
  root hard nofile 65535
  root soft nproc 32000
  root hard nproc 32000
  [root@hadoop01 ~]# vi /etc/pam.d/login
  session  required        pam_limits.so
  修改完后重启系统
  二、安装hadoop+zookeeper HA
  1.安装jdk  (五台操作)
  解压jdk
  [root@hadoop01 ~] cd /opt
  [root@hadoop01 opt]# tar zxvf jdk-7u21-linux-x64.tar.gz
  [root@hadoop01 opt]# mv jdk1.7.0_21 jdk
  配置到环境变量/etc/profile
  [root@hadoop01 opt]# vi /etc/profile
  #java
  JAVA_HOME=/opt/jdk
  PATH=$JAVA_HOME/bin:$PATH
  CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
  export JAVA_HOME
  export PATH

  export>  配置文件生效
  [root@hadoop01 opt]# source /etc/profile
  [root@hadoop01 opt]# java -version
  java version "1.7.0_21"
  Java(TM) SE Runtime Environment (build 1.7.0_21-b11)
  Java HotSpot(TM) 64-Bit Server VM (build 23.21-b01, mixed mode)
  以上说明生效
  2.解压hadoop并修改环境变量
  [root@hadoop01 ~]# tar zxvf hadoop-2.6.3.tar.gz
  [root@hadoop01 ~]#mkdir /data
  [root@hadoop01 ~]# mv hadoop-2.6.3 /data/hadoop
  [root@hadoop01 data]# vi /etc/profile
  ##hadoop
  export HADOOP_HOME=/data/hadoop
  export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/bin
  [root@hadoop01 data]# source /etc/profile
  3.修改hadoop配置文件
  [root@hadoop01 data]# cd /data/hadoop/etc/hadoop/
  [root@hadoop01 hadoop]# vi slaves
  hadoop01
  hadoop02
  hadoop03
  hadoop04
  hadoop05
  以上利用hadoop01,hadoop02两台磁盘空间,也增加进去了,不介意增加。
  [root@hadoop01 hadoop]# vi hadoop-env.sh
DSC0001.png

  [root@hadoop01 hadoop]# vi yarn-env.sh
DSC0002.png

  修改core-site.xml文件
  [root@hadoop01 hadoop]# vi core-site.xml
  
  
  
  
  
  
  fs.defaultFS
  hdfs://cluster
  The name of the default file system.
  true
  
  
  hadoop.tmp.dir
  /data/hadoop/tmp
  
  
  ha.zookeeper.quorum
  hadoop01:2190,hadoop02:2190,hadoop03:2190,hadoop04:2190,hadoop05:2190
  
  
  io.file.buffer.size
  2048
  
  
  dfs.ha.fencing.methods
  sshfence
  
  
  dfs.ha.fencing.ssh.private-key-files
  /root/.ssh/id_dsa
  
  
  修改hdfs-site.xml文件
  [root@hadoop01 hadoop]# vi hdfs-site.xml
  
  
  
  
  
  
  dfs.nameservices
  cluster
  
  
  dfs.ha.namenodes.cluster
  nn1,nn2
  
  
  dfs.namenode.rpc-address.cluster.nn1
  hadoop01:8020
  
  
  dfs.namenode.rpc-address.cluster.nn2
  hadoop02:8020
  
  
  dfs.namenode.http-address.cluster.nn1
  hadoop01:50070
  
  
  dfs.namenode.http-address.cluster.nn2
  hadoop02:50070
  
  
  dfs.namenode.servicerpc-address.cluster.nn1
  hadoop01:53333
  
  
  dfs.namenode.servicerpc-address.cluster.nn2
  hadoop02:53333
  
  
  dfs.namenode.shared.edits.dir
  qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485;hadoop04:8485;hadoop05:8485/cluster
  
  
  dfs.client.failover.proxy.provider.cluster
  org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
  
  
  dfs.journalnode.edits.dir
  /data/hadoop/mydata/journal
  
  
  dfs.replication
  3
  
  
  dfs.namenode.name.dir
  file:/data/hadoop/mydata/name
  
  
  dfs.datanode.data.dir
  file:/data/hadoop/mydata/data
  
  
  dfs.ha.automatic-failover.enabled
  true
  
  
  dfs.webhdfs.enabled
  true
  
  
  dfs.journalnode.http-address
  0.0.0.0:8480
  
  
  dfs.journalnode.rpc-address
  0.0.0.0:8485
  
  
  dfs.permissions
  false
  
  
  修改mapred-site.xml
  [root@hadoop01 hadoop]# vi mapred-site.xml
  
  
  
  
  
  
  mapreduce.framework.name
  yarn
  
  
  mapreduce.cluster.temp.dir
  /data/hadoop/mydata/mr_temp
  
  
  mareduce.jobhistory.address
  hadoop01:10020
  
  
  mapreduce.jobhistory.webapp.address
  hadoop01:19888
  
  
  修改yarn-site.xml文件
  [root@hadoop01 hadoop]# vi yarn-site.xml
  
  
  
  
  
  yarn.resourcemanager.connect.retry-interval.ms
  60000
  
  
  yarn.resourcemanager.ha.enabled
  true
  
  
  yarn.resourcemanager.cluster-id
  rm-cluster
  
  
  yarn.resourcemanager.ha.rm-ids
  rm1,rm2
  
  
  yarn.resourcemanager.ha.id
  rm1
  
  
  yarn.resourcemanager.hostname.rm1
  hadoop01
  
  
  yarn.resourcemanager.hostname.rm2
  hadoop02
  
  
  yarn.resourcemanager.recovery.enabled
  true
  
  
  yarn.resourcemanager.store.class
  org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
  
  
  yarn.resourcemanager.zk-address
  hadoop01:2190,hadoop02:2190,hadoop03:2190,hadoop04:2190,hadoop05:2190,
  
  
  yarn.resourcemanager.address.rm1
  ${yarn.resourcemanager.hostname.rm1}:23140
  
  
  yarn.resourcemanager.scheduler.address.rm1
  ${yarn.resourcemanager.hostname.rm1}:23130
  
  
  yarn.resourcemanager.webapp.https.address.rm1
  ${yarn.resourcemanager.hostname.rm1}:23189
  
  
  yarn.resourcemanager.webapp.address.rm1
  ${yarn.resourcemanager.hostname.rm1}:23188
  
  
  yarn.resourcemanager.resource-tracker.address.rm1
  ${yarn.resourcemanager.hostname.rm1}:23125
  
  
  yarn.resourcemanager.admin.address.rm1
  ${yarn.resourcemanager.hostname.rm1}:23141
  
  
  yarn.resourcemanager.address.rm2
  ${yarn.resourcemanager.hostname.rm2}:23140
  
  
  yarn.resourcemanager.scheduler.address.rm2
  ${yarn.resourcemanager.hostname.rm2}:23130
  
  
  yarn.resourcemanager.webapp.https.address.rm2
  ${yarn.resourcemanager.hostname.rm2}:23189
  
  
  yarn.resourcemanager.webapp.address.rm2
  ${yarn.resourcemanager.hostname.rm2}:23188
  
  
  yarn.resourcemanager.resource-tracker.address.rm2
  ${yarn.resourcemanager.hostname.rm2}:23125
  
  
  yarn.resourcemanager.admin.address.rm2
  ${yarn.resourcemanager.hostname.rm2}:23141
  
  
  yarn.resourcemanager.scheduler.class
  org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
  
  
  yarn.scheduler.fair.allocation.file
  ${yarn.home.dir}/etc/hadoop/fairscheduler.xml
  
  
  yarn.nodemanager.local-dirs
  /data/hadoop/mydata/yarn_local
  
  
  yarn.nodemanager.log-dirs
  /data/hadoop/mydata/yarn_log
  
  
  yarn.nodemanager.remote-app-log-dir
  /data/hadoop/mydata/yarn_remotelog
  
  
  yarn.app.mapreduce.am.staging-dir
  /data/hadoop/mydata/yarn_userstag
  
  
  mapreduce.jobhistory.intermediate-done-dir
  /data/hadoop/mydata/yarn_intermediatedone
  
  
  mapreduce.jobhistory.done-dir
  /data/hadoop/mydata/yarn_done
  
  
  yarn.log-aggregation-enable
  true
  
  
  yarn.nodemanager.resource.memory-mb
  2048
  
  
  yarn.nodemanager.vmem-pmem-ratio
  4.2
  
  
  yarn.nodemanager.resource.cpu-vcores
  2
  
  
  yarn.nodemanager.aux-services
  mapreduce_shuffle
  
  
  yarn.nodemanager.aux-services.mapreduce.shuffle.class
  org.apache.hadoop.mapred.ShuffleHandler
  
  
  Classpath for typical applications.
  yarn.application.classpath
  
  $HADOOP_HOME/etc/hadoop,
  $HADOOP_HOME/share/hadoop/common/*,
  $HADOOP_HOME/share/hadoop/common/lib/*,
  $HADOOP_HOME/share/hadoop/hdfs/*,
  $HADOOP_HOME/share/hadoop/hdfs/lib/*,
  $HADOOP_HOME/share/hadoop/mapreduce/*,
  $HADOOP_HOME/share/hadoop/mapreduce/lib/*,
  $HADOOP_HOME/share/hadoop/yarn/*,
  $HADOOP_HOME/share/hadoop/yarn/lib/*
  
  
  
  修改fairscheduler.xml文件
  [root@hadoop01 hadoop]# vi fairscheduler.xml
  
  
  
  1024 mb, 1 vcores
  1536 mb, 1 vcores
  5
  300
  1.0
  root,yarn,search,hdfs
  
  
  1024 mb, 1 vcores
  1536 mb, 1 vcores
  
  
  1024 mb, 1 vcores
  1536 mb, 1 vcores
  
  
  创建相关xml配置中目录
  mkdir -p /data/hadoop/mydata/yarn
  4.解压zookeeper并修改环境变量
  [root@hadoop01 ~]# tar zxvf zookeeper-3.4.6.tar.gz
  [root@hadoop01 ~]#mv zookeeper-3.4.6 /data/zookeeper
  [root@hadoop01 ~]# vi /etc/profile
  ##zookeeper
  export ZOOKEEPER_HOME=/data/zookeeper
  export PATH=$PATH:$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf
  [root@hadoop01 ~]# source /etc/profile
  5.修改zookeeper配置文件
  [root@hadoop01 ~]# cd /data/zookeeper/conf/
  [root@hadoop01 conf]# cp zoo_sample.cfg zoo.cfg
  [root@hadoop01 conf]# vi zoo.cfg
  # The number of milliseconds of each tick
  tickTime=2000
  # The number of ticks that the initial
  # synchronization phase can take
  initLimit=10
  # The number of ticks that can pass between
  # sending a request and getting an acknowledgement
  syncLimit=5
  # the directory where the snapshot is stored.
  # do not use /tmp for storage, /tmp here is just
  # example sakes.
  dataDir=/data/hadoop/mydata/zookeeper
  dataLogDir=/data/hadoop/mydata/zookeeperlog
  # the port at which the clients will connect
  clientPort=2190
  server.1=hadoop01:2888:3888
  server.2=hadoop02:2888:3888
  server.3=hadoop03:2888:3888
  server.4=hadoop04:2888:3888
  server.5=hadoop05:2888:3888
  # the maximum number of client connections.
  # increase this if you need to handle more clients
  #maxClientCnxns=60
  #
  # Be sure to read the maintenance section of the
  # administrator guide before turning on autopurge.
  #
  # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
  #
  # The number of snapshots to retain in dataDir
  #autopurge.snapRetainCount=3
  # Purge task interval in hours
  # Set to "0" to disable auto purge feature
  #autopurge.purgeInterval=1
  创建目录
  mkdir /data/hadoop/mydata/zookeeper
  mkdir /data/hadoop/mydata/zookeeperlog
  6.把配置hadoop、zookeeper文件目录到其他四台中
  [root@hadoop01 ~]# scp -r /data/hadoop hadoop02:/data/
  [root@hadoop01 ~]# scp -r /data/hadoop hadoop03:/data/
  [root@hadoop01 ~]# scp -r /data/hadoop hadoop04:/data/
  [root@hadoop01 ~]# scp -r /data/hadoop hadoop05:/data/
  [root@hadoop01 ~]# scp -r /data/zookeeper hadoop02:/data/
  [root@hadoop01 ~]# scp -r /data/zookeeper hadoop03:/data/
  [root@hadoop01 ~]# scp -r /data/zookeeper hadoop04:/data/
  [root@hadoop01 ~]# scp -r /data/zookeeper hadoop05:/data/
  在hadoop02修改yarn-site.xml
  [root@hadoop02 hadoop]# cd /data/hadoop/etc/hadoop/
  把rm1修改成rm2
  [root@hadoop02 hadoop]# vi yarn-site.xml
  yarn.resourcemanager.ha.id
  rm2
  [root@hadoop01 ~]# vi /data/hadoop/mydata/zookeeper/myid
  1
  [root@hadoop02 ~]# vi /data/hadoop/mydata/zookeeper/myid
  2
  [root@hadoop03 ~]# vi /data/hadoop/mydata/zookeeper/myid
  3
  [root@hadoop04 ~]# vi /data/hadoop/mydata/zookeeper/myid
  4
  [root@hadoop05 ~]# vi /data/hadoop/mydata/zookeeper/myid
  5
  7、启动zookeeper
  五台操作zkServer.sh start
  [root@hadoop01 ~]# zkServer.sh start
  [root@hadoop01 ~]# zkServer.sh status
  ZooKeeper JMX enabled by default
  Using config: /data/zookeeper/bin/../conf/zoo.cfg
  Mode: follower
  [root@hadoop03 ~]# zkServer.sh status
  ZooKeeper JMX enabled by default
  Using config: /data/zookeeper/bin/../conf/zoo.cfg
  Mode: leader
  You have new mail in /var/spool/mail/root
  正常情况只有一台leader状态
  8、格式化zookeeper集群
  在hadoop机器执行命令
  [root@hadoop01 ~]# hdfs zkfc -formatZK
  9.启动journalnode进程
  在每台启动(五台)
  [root@hadoop01 ~]# cd /data/hadoop/sbin/
  [root@hadoop01 sbin]# ./hadoop-daemon.sh start journalnode
  10.格式化namenode
  在hadoop01上执行命令
  [root@hadoop01 ~]# hdfs namenode -format
  11.启动namenode
  在hadoop01执行命令
  [root@hadoop01 ~]# cd /data/hadoop/sbin/
  [root@hadoop01 sbin]# ./hadoop-daemon.sh start  namenode
  12.将刚才格式化的namenode信息同步么备用namenode上
  [root@hadoop01 ~]# hdfs namenode -bootstrapStandby
  13.在hadoop02上启动namenode
  [root@hadoop02 ~]# cd /data/hadoop/sbin/
  [root@hadoop02  sbin]# ./hadoop-daemon.sh start  namenode
  14.启动所有datanode
  在每台执行这是根据slaves来的
  [root@hadoop01 ~]# cd /data/hadoop/sbin/
  [root@hadoop01 sbin]# ./hadoop-daemon.sh start  datanode
  15.启动yarn
  在hadoop01上执行命令
  root@hadoop01 ~]# cd /data/hadoop/sbin/
  [root@hadoop01 sbin]# ./start-yarn.sh
  16.启动ZKFC
  在hadoop01和hadoop02上启动
  [root@hadoop01 ~]# cd /data/hadoop/sbin/
  [root@hadoop01 sbin]# ./hadoop-daemon.sh start zkfc
  17.启动成功结果
DSC0003.png

DSC0004.png

  三、安装hbase HA
  1.解压hbase修改配置文件
  [root@hadoop01 ~]# tar zxvf hbase-1.0.2-bin.tar.gz
  [root@hadoop01 ~]# mv hbase-1.0.2 /data/hbase
  配置环境变量
  [root@hadoop01 ~]# vi /etc/profile
  ##hbase
  export HBASE_HOME=/data/hbase
  export PATH=$PATH:$HBASE_HOME/bin
  [root@hadoop01 ~]# source /etc/profile
  [root@hadoop01 ~]# cd /data/hbase/conf/
  [root@hadoop01 conf]# vi hbase-env.sh
  # The java implementation to use.  Java 1.7+ required.
  export JAVA_HOME="/opt/jdk"

  # Extra Java>  #记得以下一定要配置,HMaster会启动不了
  export HBASE_CLASSPATH=/data/hadoop/etc/hadoop
  # Where log files are stored.  $HBASE_HOME/logs by default.
  export HBASE_LOG_DIR=/data/hbase/logs
  # Tell HBase whether it should manage it's own instance of Zookeeper or not.
  export HBASE_MANAGES_ZK=false
  修改hbase-site.xml
  [root@hadoop01 conf]# vi hbase-site.xml
  
  
  
  
  
  hbase.rootdir
  hdfs://cluster/hbase
  
  
  hbase.cluster.distributed
  true
  
  
  hbase.tmp.dir
  /data/hbase/tmp
  
  
  hbase.master.port
  60000
  
  
  hbase.zookeeper.property.dataDir
  /data/hadoop/mydata/zookeeper
  
  
  hbase.zookeeper.quorum
  hadoop01,hadoop02,hadoop03,hadoop04,hadoop05
  
  
  hbase.zookeeper.property.clientPort
  2190
  
  
  zookeeper.session.timeout
  120000
  
  
  hbase.regionserver.restart.on.zk.expire
  true
  
  
  [root@hadoop01 conf]# vi regionservers
  hadoop01
  hadoop02
  hadoop03
  hadoop04
  hadoop05
  ~
  创建文件目录
  mkdir /data/hbase/tmp
  增加backup-master
  [root@hadoop01 conf]# vi backup-masters
  hadoop02
  以上都配置完成
  2、把文件传到其他服务器上
  [root@hadoop01 conf]# scp -r /data/hbase hadoop02:/data/
  [root@hadoop01 conf]# scp -r /data/hbase hadoop03:/data/
  [root@hadoop01 conf]# scp -r /data/hbase hadoop04:/data/
  [root@hadoop01 conf]# scp -r /data/hbase hadoop05:/data/
  3.启动hbase
  在hadoop01执行命令
  [root@hadoop01 conf]# start-hbase.sh
  4.启动结果
DSC0005.png DSC0006.png

  可以通过jps查看
  [root@hadoop01 conf]# jps
  2540 NodeManager
  1686 QuorumPeerMain
  2134 JournalNode
  2342 DFSZKFailoverController
  3041 HMaster
  1933 DataNode
  3189 HRegionServer
  2438 ResourceManager
  7848 Jps
  1827 NameNode
  以后启动过程
  每台执行(五台)
  [root@hadoop01 ~]# zkServer.sh start
  在hadoop01启动
  [root@hadoop01 ~]# cd /data/hadoop/sbin/
  [root@hadoop01 sbin]# ./start-dfs.sh
  [root@hadoop01 sbin]# ./start-yarn.sh
  最后启动hbase
  [root@hadoop01 sbin]# start-hbase.sh
  关闭过程
  先关闭hbase
  stop-hbase.sh
  在hadoop01关闭
  [root@hadoop01 ~]# cd /data/hadoop/sbin/
  [root@hadoop01 sbin]# ./stop-yarn.sh
  [root@hadoop01 sbin]# ./stop-dfs.sh


运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-627972-1-1.html 上篇帖子: Hadoop 2.X 从入门到精通系列视频课程套餐 下篇帖子: hadoop hdfs实现机制
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表