forked from GA239/hadoop-ha-docker
-
Notifications
You must be signed in to change notification settings - Fork 0
/
bootstrap.sh
executable file
·110 lines (87 loc) · 2.94 KB
/
bootstrap.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#!/bin/bash
checkArg () {
s=0
if [[ $1 == "-d" ]]; then
keeprunning=true
shift 1
let "s++"
fi
if [[ $1 == "-b" ]]; then
bash=true
shift 1
let "s++"
fi
shift=$s
}
: ${HADOOP_PREFIX:=/usr/local/hadoop};
$HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
if [ -z $CLUSTER_NAME ]; then
CLUSTER_NAME="cluster"
export CLUSTER_NAME
fi
checkArg $@
shift $shift
server="none"
if [[ -z $1 ]]; then
# start a bash
bash=true
else
server=$1
shift 1
fi
if [ -z $NNODE1_IP ] || [ -z $NNODE2_IP ] || [ -z $ZK_IPS ] || [ -z $JN_IPS ]; then
echo NNODE1_IP, NNODE2_IP, JN_IPS and ZK_IPS needs to be set as environment addresses to be able to run.
exit;
fi
JNODES=$(echo $JN_IPS | tr "," ";")
sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /usr/local/hadoop/etc/hadoop/hdfs-site.xml.template \
| sed "s/NNODE1_IP/$NNODE1_IP/" \
| sed "s/NNODE2_IP/$NNODE2_IP/" \
| sed "s/ZKNODES/$ZK_IPS/" \
| sed "s/JNODES/$JNODES/" \
> /usr/local/hadoop/etc/hadoop/hdfs-site.xml
mkdir -p /mnt/hadoop/dfs/name && mkdir -p /mnt/hadoop/dfs/data && mkdir -p /mnt/hadoop/journal/data
sed "s/CLUSTER_NAME/$CLUSTER_NAME/" /usr/local/hadoop/etc/hadoop/core-site.xml.template > /usr/local/hadoop/etc/hadoop/core-site.xml
echo SERVER=$server CLUSTER_NAME=$CLUSTER_NAME NNODE1_IP=$NNODE1_IP NNODE2_IP=$NNODE2_IP JNODES=$JNODES ZK_IPS=$ZK_IPS
if [[ $server = "format" ]]; then
$HADOOP_PREFIX/bin/hadoop namenode -format
$HADOOP_PREFIX/bin/hdfs zkfc -formatZK
exit
fi
if [[ $server = "standby" ]]; then
# instead of bootstrapStandby command, we just copy over the data from the mounted directory, which comes from nn1 and exit.
# Be sure to mount the nn1 volume
cp -r /mnt/shared/nn1/dfs/name/* /mnt/hadoop/dfs/name/
exit
fi
if [[ $server != "none" ]]; then
echo $HADOOP_PREFIX/sbin/hadoop-daemon.sh start $server $@
$HADOOP_PREFIX/sbin/hadoop-daemon.sh start $server $@
if [[ $server = "namenode" ]]; then
$HADOOP_PREFIX/sbin/hadoop-daemon.sh start zkfc
fi
fi
# press CTRL-C to exit the container
trap 'exit' INT
# Auto exit when the needed processes are not running
if [[ $keeprunning = true ]]; then
while true; do
# Only auto close when the daemon is not running. Else it could be a controlled stop
if [[ -z $(pgrep -f hadoop-daemon.sh) ]]; then
if [[ $server = "namenode" ]]; then
if [[ -z $(pgrep -f NameNode) ]]; then echo NameNode not running; $HADOOP_PREFIX/sbin/hadoop-daemon.sh stop zkfc; exit 1; fi
if [[ -z $(pgrep -f DFSZKFailoverController) ]]; then echo ZKFC not running; $HADOOP_PREFIX/sbin/hadoop-daemon.sh stop namenode; exit 1; fi
elif [[ $server = "datanode" ]]; then
if [[ -z $(pgrep -f DataNode) ]]; then echo DataNode not running; exit 1; fi
elif [[ $server = "journalnode" ]]; then
if [[ -z $(pgrep -f JournalNode) ]]; then echo JournalNode not running; exit 1; fi
fi
else
echo Hadoop daemon running
fi
sleep 3;
done
fi
if [[ $bash = true ]]; then
/bin/bash
fi