linux(ubuntu)myeclipse+eclipse+hadoop系统搭建(三)
本部分为附件部分,详述hadoop与hbase重要配置文件内容,不作为教程,而是文件内容损坏后的参考,或者强行参考。(倾情奉献by Juni)/opt/hadoop-2.6.1/etc core-site.xmlLicensedunder the Apache License, Version 2.0 (the "License");youmay not u
本部分为附件部分,详述hadoop与hbase重要配置文件内容,不作为教程,而是文件内容损坏后的参考,或者强行参考。(倾情奉献by Juni)
/opt/hadoop-2.6.1/etc core-site.xml
<?xmlversion="1.0" encoding="UTF-8"?>
<?xml-stylesheettype="text/xsl" href="configuration.xsl"?>
<!--
Licensedunder the Apache License, Version 2.0 (the "License");
youmay not use this file except in compliance with the License.
Youmay obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unlessrequired by applicable law or agreed to in writing, software
distributedunder the License is distributed on an "AS IS" BASIS,
WITHOUTWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Seethe License for the specific language governing permissions and
limitationsunder the License. See accompanying LICENSE file.
-->
<!--Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.1.148:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/hadooptmp/tmp</value>
<description>Abasefor other temporary directories.</description>
</property>
<property>
<name>hadoop.proxyuser.u0.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.u0.groups</name>
<value>*</value>
</property>
</configuration>
/opt/hadoop-2.6.1/etc/hadoop slaves
101.5.213.74S1
101.5.208.68s2
/opt/hadoop-2.6.1/etc/hadoop hadoop-env.sh
exportJAVA_HOME=/opt/java/jdk1.7.0_67
/opt/hadoop-2.6.1/etc/hadoop yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>101.5.208.157:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>101.5.208.157:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>101.5.208.157:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>101.5.208.157:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>101.5.208.157:8088</value>
</property>
</configuration>
/opt/hadoop-2.6.1/etc/hadoop mapred-site.xml.template → mapred-site.xml
<configuration>
<property>
<name>mapreduce.jobtracker.address</name>
<value>192.168.1.148:9001</value>
</property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobtracker.http.address</name>
<value>192.168.1.148:50030</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>192.168.1.148:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>192.168.1.148:19888</value>
</property>
</configuration>
/opt/hadoop-2.6.1/etc/hadoop hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>192.168.1.148:50090</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/home/hadoop/hadooptmp/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/home/hadoop/hadooptmp/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>
yarn-env.sh
exportJAVA_HOME=/opt/java/jdk1.7.0_67
_________
Createtwo folders in Home directory
hadooptmp and tmp
/opt/hadoop-2.6.1$sudo chmod -R 755 bin
:/opt/hadoop-2.6.1$sudo gedit /etc/profile
#/etc/profile: system-wide .profile file for the Bourne shell (sh(1))
#and Bourne compatible shells (bash(1), ksh(1), ash(1), ...).
if[ "$PS1" ]; then
if[ "$BASH" ] && [ "$BASH" != "/bin/sh"]; then
#The file bash.bashrc already sets the default PS1.
#PS1='\h:\w\$ '
if[ -f /etc/bash.bashrc ]; then
./etc/bash.bashrc
fi
else
if[ "`id -u`" -eq 0 ]; then
PS1='#'
else
PS1='$'
fi
fi
fi
#The default umask is now handled by pam_umask.
#See pam_umask(8) and /etc/login.defs.
if[ -d /etc/profile.d ]; then
fori in /etc/profile.d/*.sh; do
if[ -r $i ]; then
.$i
fi
done
unseti
fi
JAVA_HOME=/opt/java/jdk1.7.0_80
exportJRE_HOME=/opt/java/jdk1.7.0_80/jre
exportCLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH
exportPATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH
exportHADOOP_HOME=/opt/hadoop-2.6.1
exportHADOOP_COMMON_HOME=$HADOOP_HOME
exportHADOOP_HDFS_HOME=$HADOOP_HOME
exportHADOOP_MAPRED_HOME=$HADOOP_HOME
exportHADOOP_YARN_HOME=$HADOOP_HOME
exportHADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
exportPATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOOME/sbin:$HADOOP_HOME/lib
exportHADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
exportHADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
exportHBASE_HOME=/opt/hbase
exportPATH=$HBASE_HOME/bin:$PATH
HBASEConfigrations
copythe folder of hbase in /opt folder.
/opt/hbase/conf hbase-env.sh
exportJAVA_HOME=/opt/java/jdk1.7.0_80
hbase-site.xml
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://101.5.208.157:9000/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.master</name>
<value>101.5.208.157:60000</value>
</property>
<property>
<name>hbase.master.port</name>
<value>60000</value>
<description>The port master should bind to.</description>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>101.5.208.157,101.5.208.68,101.5.213.74</value>
</property>
<property>
<name>hbase.zookeeper.znode.parent</name>
<value>/hbase-unsecure</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/home/hadoop/hadooptmp/zk/data</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
</property>
/opt/hbase/conf regionservers
hbase-env.sh
#The java implementation to use. Java 1.7+ required.
exportJAVA_HOME=/opt/java/jdk1.7.0_67
#Configure PermSize. Only needed in JDK7. You can safely remove it forJDK8+
exportHBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m-XX:MaxPermSize=128m"
exportHBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS-XX:PermSize=128m
-XX:MaxPermSize=128m"
#The directory where pid files are stored. /tmp by default.
exportHBASE_PID_DIR=/home/hadoop/pids
#Tell HBase whether it should manage it's own instance of Zookeeper ornot.
exportHBASE_MANAGES_ZK=true
afterthen copay this hbase folder to slaves and then run thesecommands on master after making some settings on slaves.
:/opt/hbase/bin$./start-hbase.sh
开放原子开发者工作坊旨在鼓励更多人参与开源活动,与志同道合的开发者们相互交流开发经验、分享开发心得、获取前沿技术趋势。工作坊有多种形式的开发者活动,如meetup、训练营等,主打技术交流,干货满满,真诚地邀请各位开发者共同参与!
更多推荐
所有评论(0)