forked from futurewei-cloud/caerus-dikeHDFS
-
Notifications
You must be signed in to change notification settings - Fork 3
/
start_server.sh
executable file
·94 lines (75 loc) · 2.85 KB
/
start_server.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e # exit on error
source ./config.sh
pushd "$(dirname "$0")" # connect to root
ROOT_DIR=$(pwd)
echo "ROOT_DIR ${ROOT_DIR}"
USER_NAME=${SUDO_USER:=$USER}
USER_ID=$(id -u "${USER_NAME}")
# Set the home directory in the Docker container.
DOCKER_HOME_DIR=${DOCKER_HOME_DIR:-/home/${USER_NAME}}
HADOOP_HOME=/opt/hadoop/hadoop-${HADOOP_VERSION}
# Make sure server directory exists
mkdir -p ${ROOT_DIR}/server
# Create NameNode and DataNode mount points
mkdir -p ${ROOT_DIR}/volume/namenode
mkdir -p ${ROOT_DIR}/volume/datanode0
mkdir -p ${ROOT_DIR}/volume/logs
mkdir -p "${ROOT_DIR}/volume/status"
rm -f ${ROOT_DIR}/volume/status/*
# Can be used to transfer data to HDFS
mkdir -p ${ROOT_DIR}/data
CMD="bin/start-hadoop.sh"
RUNNING_MODE="daemon"
if [ "$#" -ge 1 ] ; then
CMD="$*"
RUNNING_MODE="interactive"
fi
if [ "$RUNNING_MODE" = "interactive" ]; then
DOCKER_IT="-i -t"
fi
DOCKER_RUN="docker run --rm=true ${DOCKER_IT} \
-v ${ROOT_DIR}/data:/data \
-v "${ROOT_DIR}/dikeHDFS:${DOCKER_HOME_DIR}/dikeHDFS" \
-v ${ROOT_DIR}/volume/namenode:/opt/volume/namenode \
-v ${ROOT_DIR}/volume/datanode0:/opt/volume/datanode \
-v ${ROOT_DIR}/volume/status:/opt/volume/status \
-v ${ROOT_DIR}/volume/logs:${HADOOP_HOME}/logs \
-v ${ROOT_DIR}/etc/hadoop/core-site.xml:${HADOOP_HOME}/etc/hadoop/core-site.xml \
-v ${ROOT_DIR}/etc/hadoop/hdfs-site.xml:${HADOOP_HOME}/etc/hadoop/hdfs-site.xml \
-v ${ROOT_DIR}/scripts/start-hadoop.sh:${HADOOP_HOME}/bin/start-hadoop.sh \
-v ${ROOT_DIR}/server:/server \
-w ${HADOOP_HOME} \
-e HADOOP_HOME=${HADOOP_HOME} \
-e HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop \
-e RUNNING_MODE=${RUNNING_MODE} \
-u ${USER_ID} \
--network dike-net \
--name dikehdfs --hostname dikehdfs \
hadoop-${HADOOP_VERSION}-ndp-${USER_NAME} ${CMD}"
#echo "$DOCKER_RUN"
if [ "$RUNNING_MODE" = "interactive" ]; then
eval "${DOCKER_RUN}"
else
eval "${DOCKER_RUN}" &
while [ ! -f "${ROOT_DIR}/volume/status/HADOOP_STATE" ]; do
sleep 1
done
cat "${ROOT_DIR}/volume/status/HADOOP_STATE"
#docker exec dikehdfs /server/dikeHDFS &
fi
popd