zl程序教程

您现在的位置是:首页 >  工具

当前栏目

大数据学习环境搭建(CentOS6.9+Hadoop2.7.3+Hive1.2.1+Hbase1.3.1+Spark2.1.1)详解大数据

学习数据 详解 环境 搭建 Hadoop2.7
2023-06-13 09:20:28 时间

以前搭建过一套,带Federation,至少需4台机器,过于复杂,笔记本也吃不消
。现为了学习Spark2.0版本,决定去掉
Federation,
简化学习环境,不过还是完全分布式


[[email protected] ~]# cp /root/zookeeper-3.4.9/conf/zoo_sample.cfg /root/zookeeper-3.4.9/conf/zoo.cfg

[[email protected] ~]# vi /root/zookeeper-3.4.9/conf/log4j.properties 

export PATH=.:$PATH:$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:/root:$HIVE_HOME/bin:$HBASE_HOME/bin:$SPARK_HOME
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
  value org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider /value
libhadoop.so.1.0.0: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, not stripped
[[email protected] ~]# /root/hadoop-2.7.3/sbin/hadoop-daemon.sh start journalnode

[[email protected] ~]# /root/hadoop-2.7.3/sbin/hadoop-daemon.sh start journalnode

[[email protected] ~]# /root/hadoop-2.7.3/sbin/hadoop-daemon.sh start namenode 

[[email protected] ~]# /root/hadoop-2.7.3/bin/hdfs namenode -bootstrapStandby

[[email protected] ~]# /root/hadoop-2.7.3/sbin/hadoop-daemon.sh start namenode

[[email protected] ~]# /root/hadoop-2.7.3/sbin/hadoop-daemon.sh start datanode

[[email protected] ~]# /root/hadoop-2.7.3/sbin/hadoop-daemon.sh start datanode

[[email protected] ~]# /root/hadoop-2.7.3/sbin/hadoop-daemon.sh start datanode

[[email protected] ~]# /root/hadoop-2.7.3/sbin/yarn-daemon.sh start resourcemanager

[[email protected] ~]# /root/hadoop-2.7.3/sbin/yarn-daemon.sh start resourcemanager

[[email protected] ~]# /root/hadoop-2.7.3/sbin/yarn-daemon.sh start nodemanager

[[email protected] ~]# /root/hadoop-2.7.3/sbin/yarn-daemon.sh start nodemanager

[[email protected] ~]# /root/hadoop-2.7.3/sbin/yarn-daemon.sh start nodemanager

[[email protected] ~]# mysql -h localhost -u root -p

Enter password: AAAaaa111

mysql GRANT ALL PRIVILEGES ON *.* TO root @ % IDENTIFIED BY AAAaaa111 WITH GRANT OPTION; mysql flush privileges;


由于官方提供的spark-2.1.1-bin-hadoop2.7.tgz包中集成的Hive是1.2.1,所以Hive版本选择1.2.1
[[email protected] ~]# cp /root/apache-hive-1.2.1-bin/conf/hive-env.sh.template /root/apache-hive-1.2.1-bin/conf/hive-env.sh

[[email protected] ~]# cp /root/apache-hive-1.2.1-bin/conf/hive-log4j.properties.template /root/apache-hive-1.2.1-bin/conf/hive-log4j.properties

value jdbc:mysql://node1:3306/hive?createDatabaseIfNotExist=true amp;characterEncoding=UTF-8 /value
     
 /root/apache-hive-1.2.1-bin/bin/hive service metastore /dev/null 2 1

[[email protected] ~]# ln -s /etc/init.d/hive-metastore /etc/rc.d/rc3.d/S65hive-metastore

[[email protected] ~]# cp /root/hadoop-2.7.3/etc/hadoop/hdfs-site.xml /root/hadoop-2.7.3/etc/hadoop/core-site.xml /root/hbase-1.3.1/conf/

#export HBASE_REGIONSERVER_OPTS= $HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m
[[email protected] ~]# cp /root/spark-2.1.1-bin-hadoop2.7/conf/spark-env.sh.template /root/spark-2.1.1-bin-hadoop2.7/conf/spark-env.sh

[[email protected] ~]# vi /root/spark-2.1.1-bin-hadoop2.7/conf/spark-env.sh

export SPARK_DAEMON_JAVA_OPTS= -Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=node1:2181,node2:2181,node3:2181 -Dspark.deploy.zookeeper.dir=/spark
[[email protected] ~]# cp /root/spark-2.1.1-bin-hadoop2.7/conf/slaves.template /root/spark-2.1.1-bin-hadoop2.7/conf/slaves

ssh [email protected] export BASH_ENV=/etc/profile;/root/zookeeper-3.4.9/bin/zkServer.sh start
ssh root@node3 export BASH_ENV=/etc/profile;/root/zookeeper-3.4.9/bin/zkServer.sh start
#ssh root@node2 export BASH_ENV=/etc/profile;/root/hadoop-2.7.3/sbin/yarn-daemon.sh start resourcemanager
ssh root@node2 export BASH_ENV=/etc/profile;/root/hadoop-2.7.3/sbin/hadoop-daemon.sh start zkfc
ssh root@node3 export BASH_ENV=/etc/profile;/root/hadoop-2.7.3/sbin/hadoop-daemon.sh start zkfc
echo Y | ssh root@node1 export BASH_ENV=/etc/profile;/root/hadoop-2.7.3/bin/yarn rmadmin -transitionToActive forcemanual rm1
#ssh root@node2 export BASH_ENV=/etc/profile;/root/hbase-1.3.1/bin/hbase-daemon.sh start master
#ssh root@node2 export BASH_ENV=/etc/profile;/root/spark-2.1.1-bin-hadoop2.7/sbin/start-master.sh
#ssh root@node2 export BASH_ENV=/etc/profile;/root/hadoop-2.7.3/sbin/yarn-daemon.sh stop resourcemanager
ssh root@node2 export BASH_ENV=/etc/profile;/root/hadoop-2.7.3/sbin/hadoop-daemon.sh stop zkfc
ssh root@node2 export BASH_ENV=/etc/profile;/root/zookeeper-3.4.9/bin/zkServer.sh stop
ssh root@node3 export BASH_ENV=/etc/profile;/root/zookeeper-3.4.9/bin/zkServer.sh stop