hadoop高可用搭建

core.site.xml

hadoop高可用搭建
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
        <property>
                <name>fs.defaultFS</name>
                <value>hdfs://ostream-hdfs</value>
        </property>
        <property>
                  <name>hadoop.tmp.dir</name>
                  <value>/home/hadoop/opt/hadoop-2.10.1/data/tmp</value>
        </property>
        <property>
                 <name>fs.trash.interval</name>
                 <value>2</value>
        </property>
        <property>
                <name>fs.trash.checkpoint.interval</name>
                <value>1</value>
        </property>
        <property>
                <name>hadoop.http.staticuser.user</name>
                <value>hadoop</value>
        </property>
        <property>
                <name>ha.zookeeper.quorum</name>
                <value>hadoop-zxkf-1:2181,hadoop-zxkf-2:2181,hadoop-zxkf-3:2181</value>
         </property>
</configuration>
View Code

hdfs-site.xml

hadoop高可用搭建
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>
        <!-- 完全分布式集群名称 -->
        <property>
                <name>dfs.nameservices</name>
                <value>ostream-hdfs</value>
        </property>
        <!-- 集群中NameNode节点都有哪些 -->
        <property>
                <name>dfs.ha.namenodes.ostream-hdfs</name>
                <value>nn1,nn2</value>
        </property>
        <!-- nn1的RPC通信地址 -->
        <property>
                <name>dfs.namenode.rpc-address.ostream-hdfs.nn1</name>
                <value>hadoop-zxkf-1:8020</value>
        </property>
        <!-- nn2的RPC通信地址 -->
        <property>
                <name>dfs.namenode.rpc-address.ostream-hdfs.nn2</name>
                <value>hadoop-zxkf-3:8020</value>
        </property>
        <!-- nn1的http通信地址 -->
        <property>
                <name>dfs.namenode.http-address.ostream-hdfs.nn1</name>
                <value>hadoop-zxkf-1:50070</value>
        </property>

        <!-- nn2的http通信地址 -->
        <property>
                <name>dfs.namenode.http-address.ostream-hdfs.nn2</name>
                <value>hadoop-zxkf-3:50070</value>
        </property>
        <!-- 指定NameNode元数据在JournalNode上的存放位置 -->
        <property>
                <name>dfs.namenode.shared.edits.dir</name>
                <value>qjournal://hadoop-zxkf-1:8485;hadoop-zxkf-2:8485;hadoop-zxkf-3:8485/ostream-hdfs</value>
        </property>
        <!-- 配置隔离机制,即同一时刻只能有一台服务器对外响应 -->
        <property>
                <name>dfs.ha.fencing.methods</name>
                <value>sshfence</value>
        </property>
        <!-- 使用隔离机制时需要ssh无秘钥登录-->
        <property>
                <name>dfs.ha.fencing.ssh.private-key-files</name>
                <value>/home/hadoop/.ssh/id_rsa</value>
        </property>
        <!-- 声明journalnode服务器存储目录-->
        <property>
                <name>dfs.journalnode.edits.dir</name>
                <value>/home/hadoop/opt/hadoop-2.10.1/data/jn</value>
        </property>
        <!-- 关闭权限检查-->
        <property>
                <name>dfs.permissions.enable</name>
                <value>false</value>
        </property>
        <!-- 访问代理类:client,ostream-hdfs,active配置失败自动切换实现方式-->
        <property>
                <name>dfs.client.failover.proxy.provider.ostream-hdfs</name>
                <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
        </property>
        <!-- automatic failover-->
        <property>
                 <name>dfs.ha.automatic-failover.enabled</name>
                 <value>true</value>
         </property>
</configuration>
View Code

yarn-site.xml

hadoop高可用搭建
<?xml version="1.0"?>
<configuration>
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
        </property>
        <!--日志聚合-->
        <property>
                <name>yarn.log-aggregation-enable</name>
                <value>true</value>
        </property>
        <!--任务历史服务-->
        <property>
                <name>yarn.log.server.url</name>
                <value>http://hadoop-zxkf-1:19888/jobhistory/logs/</value>
        </property>

        <property>
                <name>yarn.log-aggregation.retain-seconds</name>
                <value>86400</value>
        </property>

        <!--启用resourcemanager ha-->
        <property>
                <name>yarn.resourcemanager.ha.enabled</name>
                <value>true</value>
        </property>

        <!--声明两台resourcemanager的地址-->
        <property>
                <name>yarn.resourcemanager.cluster-id</name>
                <value>cluster-yarn1</value>
        </property>

        <property>
                <name>yarn.resourcemanager.ha.rm-ids</name>
                <value>rm1,rm2</value>
        </property>

        <property>
                <name>yarn.resourcemanager.hostname.rm1</name>
                <value>hadoop-zxkf-2</value>
        </property>

        <property>
                <name>yarn.resourcemanager.hostname.rm2</name>
                <value>hadoop-zxkf-3</value>
        </property>

        <!--指定zookeeper集群的地址-->
        <property>
                <name>yarn.resourcemanager.zk-address</name>
                <value>hadoop-zxkf-1:2181,hadoop-zxkf-2:2181,hadoop-zxkf-3:2181</value>
        </property>

        <!--启用自动恢复-->
        <property>
                <name>yarn.resourcemanager.recovery.enabled</name>
                <value>true</value>
        </property>

        <!--指定resourcemanager的状态信息存储在zookeeper集群-->
        <property>
                <name>yarn.resourcemanager.store.class</name>     
                <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
        </property>
</configuration>
View Code

 

上一篇:Flink 源码(十六):Flink 环境准备及提交流程(七)yarn-per-job 提交流程(五)


下一篇:hadoop之yarn详解(基础架构篇)