hadoop 2.X HA详细配置
2023-09-14 09:00:23 时间
hdfs namenode -initializeSharedEdits //复制edits log文件到journalnode节点上,第一次创建得在格式化namenode之后使用
http://hadoop-yarn1:8480来看journal是否正常
2.格式化namenode,并启动Active Namenode
一、Active NameNode节点上格式化namenode
hdfs namenode -format
hdfs namenode -initializeSharedEdits
初始化journalnode完毕
二、启动Active Namenode
hadoop-daemon.sh start namenode
3.启动 Standby namenode
一、Standby namenode节点上格式化Standby节点
复制Active Namenode上的元数据信息拷贝到Standby Namenode节点上
hdfs namenode -bootstrapStandby
二、启动Standby节点
hadoop-daemon.sh start namenode
4.启动Automatic Failover
在zookeeper上创建 /hadoop-ha/ns1这样一个监控节点(ZNode)
hdfs zkfc -formatZK
start-dfs.sh
5.查看namenode状态
hdfs haadmin -getServiceState nn1 active
6.自动failover
hdfs haadmin -failover nn1 nn2
配置文件详细信息
core-site.xml
configuration property name fs.defaultFS /name value hdfs://ns1 /value /property property name hadoop.tmp.dir /name value /opt/modules/hadoop-2.2.0/data/tmp /value /property property name fs.trash.interval /name value 60*24 /value /property property name ha.zookeeper.quorum /name value hadoop-yarn1:2181,hadoop-yarn2:2181,hadoop-yarn3:2181 /value /property property name hadoop.http.staticuser.user /name value yuanhai /value /property /configuration
hdfs-site.xml
configuration property name dfs.replication /name value 3 /value /property property name dfs.nameservices /name value ns1 /value /property property name dfs.ha.namenodes.ns1 /name value nn1,nn2 /value /property property name dfs.namenode.rpc-address.ns1.nn1 /name value hadoop-yarn1:8020 /value /property property name dfs.namenode.rpc-address.ns1.nn2 /name value hadoop-yarn2:8020 /value /property property name dfs.namenode.http-address.ns1.nn1 /name value hadoop-yarn1:50070 /value /property property name dfs.namenode.http-address.ns1.nn2 /name value hadoop-yarn2:50070 /value /property property name dfs.namenode.shared.edits.dir /name value qjournal://hadoop-yarn1:8485;hadoop-yarn2:8485;hadoop-yarn3:8485/ns1 /value /property property name dfs.journalnode.edits.dir /name value /opt/modules/hadoop-2.2.0/data/tmp/journal /value /property property name dfs.ha.automatic-failover.enabled /name value true /value /property property name dfs.client.failover.proxy.provider.ns1 /name value org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider /value /property property name dfs.ha.fencing.methods /name value sshfence /value /property property name dfs.ha.fencing.ssh.private-key-files /name value /home/hadoop/.ssh/id_rsa /value /property property name dfs.permissions.enabled /name value false /value /property !-- property name dfs.namenode.http-address /name value hadoop-yarn.dragon.org:50070 /value /property property name dfs.namenode.secondary.http-address /name value hadoop-yarn.dragon.org:50090 /value /property property name dfs.namenode.name.dir /name value file://${hadoop.tmp.dir}/dfs/name /value /property property name dfs.namenode.edits.dir /name value ${dfs.namenode.name.dir} /value /property property name dfs.datanode.data.dir /name value file://${hadoop.tmp.dir}/dfs/data /value /property property name dfs.namenode.checkpoint.dir /name value file://${hadoop.tmp.dir}/dfs/namesecondary /value /property property name dfs.namenode.checkpoint.edits.dir /name value ${dfs.namenode.checkpoint.dir} /value /property -- /configuration
slaves
hadoop-yarn1 hadoop-yarn2 hadoop-yarn3
yarn-site.xml
configuration property name yarn.nodemanager.aux-services /name value mapreduce_shuffle /value /property property name yarn.resourcemanager.hostname /name value hadoop-yarn1 /value /property property name yarn.log-aggregation-enable /name value true /value /property property name yarn.log-aggregation.retain-seconds /name value 604800 /value /property /configuration
mapred-site.xml
configuration property name mapreduce.framework.name /name value yarn /value /property property name mapreduce.jobhistory.address /name value hadoop-yarn1:10020 /value description MapReduce JobHistory Server IPC host:port /description /property property name mapreduce.jobhistory.webapp.address /name value hadoop-yarn1:19888 /value description MapReduce JobHistory Server Web UI host:port /description /property property name mapreduce.job.ubertask.enable /name value true /value /property /configuration
hadoop-env.sh
export JAVA_HOME=/opt/modules/jdk1.6.0_24
其他相关文章:
http://blog.csdn.net/zhangzhaokun/article/details/17892857
本文出自 “点滴积累” 博客,请务必保留此出处http://tianxingzhe.blog.51cto.com/3390077/1711811
好程序员大数据教程Hadoop全分布安装(非HA) 机器名称 启动服务 linux11 namenode secondrynamenode datanode linux12 datanode linux13 datanode 第一步:更改主机名,临时修改+永久修改 临时修改:hostname linux11 永久修改: vi /e.
搭建Hadoop的HA高可用架构(超详细步骤+已验证) 一、集群的规划 Zookeeper集群: 192.168.182.12 (bigdata12)192.168.182.13 (bigdata13)192.168.182.14 (bigdata14) Hadoop集群:
Hadoop手把手逐级搭建(4) Hadoop高可用+联邦+视图文件系统(HA+Federation+ViewFs) 步骤概述 1). 为高可用保存hadoop配置 2). 增加federation配置 3). 首次启动HA+Federation集群part1:启动journalnode和zookeeper,格式化zookeeper集群
相关文章
- hadoop配置在线刷新
- Hadoop集群-NTP时钟同步配置
- Hadoop-2.2.0集群安装配置实践
- 【Big Data】HADOOP集群的配置(一)
- Elasticsearch 7.x生产配置
- Linux Tomcat 6.0安装配置实践总结
- Ubuntu下 Hadoop 1.2.1 配置安装 - 大T的专栏 - 博客频道 - CSDN.NE
- Ubuntu下 Hadoop 1.2.1 配置安装 - 大T的专栏 - 博客频道 - CSDN.NE
- 安装配置plsql连接远端oracle数据库
- Spring异常解决 java.lang.NullPointerException,配置spring管理hibernate时出错
- Data - Hadoop单机配置 - 使用Hadoop2.8.0和Ubuntu16.04
- 【Big Data】HADOOP集群的配置(二)
- Hadoop 2.5.2分布式集群配置
- 什么是Nacos?Nacos注册配置中心介绍
- manjaro yay配置--aururl以提升下载速度
- Atitit Hadoop的MapReduce的执行过程、数据流的一点理解。 目录 1. Why 为什么使用hadoop1 2. Hadoop的MapReduce的执行过程1 2.1. Had
- 如何下载Visual Studio Code及配置教程
- hadoop - 安装详解【包括jdk配置】
- 【Android Gradle 插件】Splits 配置 ③ ( Splits#density{} 脚本块配置 | 根据屏幕像素密度进行分包 | DensitySplitOptions 配置简介 )
- 10.3 hadoop地址配置、内存配置、守护进程设置、环境设置
- hadoop(六) - ZooKeeper安装与配置
- Hadoop MapReduce执行过程详解(带hadoop例子)
- 玩转华为数据中心交换机系列 | 配置动态路由接入M-LAG
- 11 openEuler基础配置-设置磁盘调度算法
- CentOS 7.2.1511 配置SSH登陆免密
- hadoop权威指南 chapter1 Meet Hadoop
- Hadoop之hadoop fs命令
- 遇到问题之-centos安装配置hadoop超详细过程(含故障排除)
- Hadoop(25):Yarn核心参数配置案例