From 796905d36c40d36ae13c459e17ef09c73f9b6c39 Mon Sep 17 00:00:00 2001 From: Lindsay Eliasen Date: Thu, 22 Aug 2024 15:00:02 -0500 Subject: [PATCH 1/5] CASMPET-7209 add instructions to mount admin-tools bucket if desired (cherry picked from commit 5e4184d360e1769075cae2bcb25df71061c284d0) --- .../Troubleshoot_S3FS_Mounts.md | 74 ++++++++++++++++++- 1 file changed, 71 insertions(+), 3 deletions(-) diff --git a/operations/utility_storage/Troubleshoot_S3FS_Mounts.md b/operations/utility_storage/Troubleshoot_S3FS_Mounts.md index 768eca96c820..d7c05cdd3075 100644 --- a/operations/utility_storage/Troubleshoot_S3FS_Mounts.md +++ b/operations/utility_storage/Troubleshoot_S3FS_Mounts.md @@ -8,10 +8,12 @@ Master nodes should host the following three mount points: ```bash /var/opt/cray/config-data (config-data S3 bucket) -/var/lib/admin-tools (admin-tools S3 bucket) /var/opt/cray/sdu/collection-mount (sds S3 bucket) ``` +> NOTE: the mount `/var/lib/admin-tools (admin-tools S3 bucket)` is no longer mounted in CSM 1.4+. +> If it is desired to have this bucket mounted, please see [Mount 'admin-tools' S3 bucket](#mount-admin-tools-s3-bucket). + ## Worker Node Mount Points Worker nodes should host the following mount point: @@ -31,7 +33,6 @@ Run the following command on master nodes to ensure the mounts are present: ```bash ncn-m: # mount | grep 's3fs on' s3fs on /var/opt/cray/config-data type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0) -s3fs on /var/lib/admin-tools type fuse.s3fs (rw,relatime,user_id=0,group_id=0,allow_other) s3fs on /var/opt/cray/sdu/collection-mount type fuse.s3fs (rw,relatime,user_id=0,group_id=0,allow_other) ``` @@ -57,7 +58,6 @@ Ensure the `/etc/fstab` contains the following content: ```bash ncn-m: # grep fuse.s3fs /etc/fstab sds /var/opt/cray/sdu/collection-mount fuse.s3fs _netdev,allow_other,passwd_file=/root/.sds.s3fs,url=http://rgw-vip.nmn,use_path_request_style,use_cache=/var/lib/s3fs_cache,check_cache_dir_exist,use_xattr,uid=2370,gid=2370,umask=0007,allow_other 0 0 -admin-tools /var/lib/admin-tools fuse.s3fs _netdev,allow_other,passwd_file=/root/.admin-tools.s3fs,url=http://rgw-vip.nmn,use_path_request_style,use_cache=/var/lib/s3fs_cache,check_cache_dir_exist,use_xattr 0 0 config-data /var/opt/cray/config-data fuse.s3fs _netdev,allow_other,passwd_file=/root/.config-data.s3fs,url=http://rgw-vip.nmn,use_path_request_style,use_xattr 0 0 ``` @@ -84,3 +84,71 @@ ncn-mw: # mount -a If the above command fails, then the error likely indicates that there is an issue communicating with Ceph's `Radosgw` endpoint (`rgw-vip`). In this case the [Troubleshoot an Unresponsive S3 Endpoint](Troubleshoot_an_Unresponsive_S3_Endpoint.md) procedure should be followed to ensure the endpoint is healthy. + +## Mount 'admin-tools' S3 bucket + +In CSM 1.2 and CSM 1.3, `/var/lib/admin-tools (admin-tools S3 bucket)` was a mounted S3 bucket. Starting in CSM 1.4, the `admin-tools` S3 bucket is no longer mounted. +It is not necessary for this bucket to be mounted for system operations. +However, if it is desired to have the `admin-tools` S3 bucket mounted, please follow the steps below. + + 1. (`ncn-m#`) Mount the `admin-tools` S3 bucket. Run this on each master node where the bucket should be mounted. + + ```bash + function mount_admin_tools() { + s3_bucket="admin-tools" + s3fs_mount_dir="/var/lib/admin-tools" + s3_user="admin-tools" + + s3fs_cache_dir=/var/lib/s3fs_cache + if [ -d ${s3fs_cache_dir} ]; then + s3fs_opts="use_path_request_style,use_cache=${s3fs_cache_dir},check_cache_dir_exist,use_xattr" + else + s3fs_opts="use_path_request_style,use_xattr" + fi + + echo "Configuring for ${s3_bucket} S3 bucket at ${s3fs_mount_dir} for ${s3_user} S3 user" + + mkdir -p ${s3fs_mount_dir} + + pwd_file=/root/.${s3_user}.s3fs + access_key=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.access_key' | base64 -d) + secret_key=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.secret_key' | base64 -d) + s3_endpoint=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.http_s3_endpoint' | base64 -d) + + echo "${access_key}:${secret_key}" > ${pwd_file} + chmod 600 ${pwd_file} + + echo "Mounting bucket: ${s3_bucket} at ${s3fs_mount_dir}" + s3fs ${s3_bucket} ${s3fs_mount_dir} -o passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} + + echo "Adding fstab entry for ${s3_bucket} S3 bucket at ${s3fs_mount_dir} for ${s3_user} S3 user" + echo "${s3_bucket} ${s3fs_mount_dir} fuse.s3fs _netdev,allow_other,passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} 0 0" >> /etc/fstab + + echo "Set cache pruning for admin tools to 5G of the 200G volume (every 2nd hour)" + echo "0 */2 * * * root /usr/bin/prune-s3fs-cache.sh admin-tools ${s3fs_cache_dir} 5368709120 -silent" > /etc/cron.d/prune-s3fs-admin-tools-cache + } + mount_admin_tools + ``` + + 1. (`ncn-m#`) Check that the `admin-tools` bucket is mounted correctly. + + 1. Check `/etc/fstab` contains the following content: + + ```bash + ncn-m: # grep fuse.s3fs /etc/fstab + sds /var/opt/cray/sdu/collection-mount fuse.s3fs _netdev,allow_other,passwd_file=/root/.sds.s3fs,url=http://rgw-vip.nmn,use_path_request_style,use_cache=/var/lib/s3fs_cache,check_cache_dir_exist,use_xattr,uid=2370,gid=2370,umask=0007,allow_other 0 0 + admin-tools /var/lib/admin-tools fuse.s3fs _netdev,allow_other,passwd_file=/root/.admin-tools.s3fs,url=http://rgw-vip.nmn,use_path_request_style,use_cache=/var/lib/s3fs_cache,check_cache_dir_exist,use_xattr 0 0 + config-data /var/opt/cray/config-data fuse.s3fs _netdev,allow_other,passwd_file=/root/.config-data.s3fs,url=http://rgw-vip.nmn,use_path_request_style,use_xattr 0 0 + ``` + + 1. Check that `/var/lib/admin-tools` is mounted. + + ```bash + ncn-m: # mount | grep 's3fs on' + s3fs on /var/opt/cray/config-data type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0) + s3fs on /var/lib/admin-tools type fuse.s3fs (rw,relatime,user_id=0,group_id=0,allow_other) + s3fs on /var/opt/cray/sdu/collection-mount type fuse.s3fs (rw,relatime,user_id=0,group_id=0,allow_other) + ``` + +> NOTE: This mount will not be recreated after a node upgrade or rebuild. +> This procedure will need to be redone in the case of a node upgrade or rebuild. From 9f25a3bf4529775642aa54524a8143f0da18ae0b Mon Sep 17 00:00:00 2001 From: Lindsay Eliasen Date: Fri, 23 Aug 2024 15:31:06 -0500 Subject: [PATCH 2/5] CASMPET-7209 add script to mount admin-tools bucket (cherry picked from commit 7b274a4fca1c794adfe9f850a1207ad7dbf3b67f) --- .../Troubleshoot_S3FS_Mounts.md | 67 ++------------- scripts/mount-admin-tools-bucket.sh | 86 +++++++++++++++++++ 2 files changed, 93 insertions(+), 60 deletions(-) create mode 100755 scripts/mount-admin-tools-bucket.sh diff --git a/operations/utility_storage/Troubleshoot_S3FS_Mounts.md b/operations/utility_storage/Troubleshoot_S3FS_Mounts.md index d7c05cdd3075..b54cf83ee426 100644 --- a/operations/utility_storage/Troubleshoot_S3FS_Mounts.md +++ b/operations/utility_storage/Troubleshoot_S3FS_Mounts.md @@ -89,66 +89,13 @@ In this case the [Troubleshoot an Unresponsive S3 Endpoint](Troubleshoot_an_Unre In CSM 1.2 and CSM 1.3, `/var/lib/admin-tools (admin-tools S3 bucket)` was a mounted S3 bucket. Starting in CSM 1.4, the `admin-tools` S3 bucket is no longer mounted. It is not necessary for this bucket to be mounted for system operations. -However, if it is desired to have the `admin-tools` S3 bucket mounted, please follow the steps below. - - 1. (`ncn-m#`) Mount the `admin-tools` S3 bucket. Run this on each master node where the bucket should be mounted. - - ```bash - function mount_admin_tools() { - s3_bucket="admin-tools" - s3fs_mount_dir="/var/lib/admin-tools" - s3_user="admin-tools" - - s3fs_cache_dir=/var/lib/s3fs_cache - if [ -d ${s3fs_cache_dir} ]; then - s3fs_opts="use_path_request_style,use_cache=${s3fs_cache_dir},check_cache_dir_exist,use_xattr" - else - s3fs_opts="use_path_request_style,use_xattr" - fi - - echo "Configuring for ${s3_bucket} S3 bucket at ${s3fs_mount_dir} for ${s3_user} S3 user" - - mkdir -p ${s3fs_mount_dir} - - pwd_file=/root/.${s3_user}.s3fs - access_key=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.access_key' | base64 -d) - secret_key=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.secret_key' | base64 -d) - s3_endpoint=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.http_s3_endpoint' | base64 -d) - - echo "${access_key}:${secret_key}" > ${pwd_file} - chmod 600 ${pwd_file} - - echo "Mounting bucket: ${s3_bucket} at ${s3fs_mount_dir}" - s3fs ${s3_bucket} ${s3fs_mount_dir} -o passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} - - echo "Adding fstab entry for ${s3_bucket} S3 bucket at ${s3fs_mount_dir} for ${s3_user} S3 user" - echo "${s3_bucket} ${s3fs_mount_dir} fuse.s3fs _netdev,allow_other,passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} 0 0" >> /etc/fstab - - echo "Set cache pruning for admin tools to 5G of the 200G volume (every 2nd hour)" - echo "0 */2 * * * root /usr/bin/prune-s3fs-cache.sh admin-tools ${s3fs_cache_dir} 5368709120 -silent" > /etc/cron.d/prune-s3fs-admin-tools-cache - } - mount_admin_tools - ``` - - 1. (`ncn-m#`) Check that the `admin-tools` bucket is mounted correctly. - - 1. Check `/etc/fstab` contains the following content: - - ```bash - ncn-m: # grep fuse.s3fs /etc/fstab - sds /var/opt/cray/sdu/collection-mount fuse.s3fs _netdev,allow_other,passwd_file=/root/.sds.s3fs,url=http://rgw-vip.nmn,use_path_request_style,use_cache=/var/lib/s3fs_cache,check_cache_dir_exist,use_xattr,uid=2370,gid=2370,umask=0007,allow_other 0 0 - admin-tools /var/lib/admin-tools fuse.s3fs _netdev,allow_other,passwd_file=/root/.admin-tools.s3fs,url=http://rgw-vip.nmn,use_path_request_style,use_cache=/var/lib/s3fs_cache,check_cache_dir_exist,use_xattr 0 0 - config-data /var/opt/cray/config-data fuse.s3fs _netdev,allow_other,passwd_file=/root/.config-data.s3fs,url=http://rgw-vip.nmn,use_path_request_style,use_xattr 0 0 - ``` - - 1. Check that `/var/lib/admin-tools` is mounted. - - ```bash - ncn-m: # mount | grep 's3fs on' - s3fs on /var/opt/cray/config-data type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0) - s3fs on /var/lib/admin-tools type fuse.s3fs (rw,relatime,user_id=0,group_id=0,allow_other) - s3fs on /var/opt/cray/sdu/collection-mount type fuse.s3fs (rw,relatime,user_id=0,group_id=0,allow_other) - ``` +However, if it is desired to have the `admin-tools` S3 bucket mounted, please run the following script on all master nodes where the `admin-tools` bucket should be mounted. + +(`ncn-m#`) Mount the `admin-tools` S3 bucket. + + ```bash + /usr/share/doc/csm/scripts/mount-admin-tools-bucket.sh + ``` > NOTE: This mount will not be recreated after a node upgrade or rebuild. > This procedure will need to be redone in the case of a node upgrade or rebuild. diff --git a/scripts/mount-admin-tools-bucket.sh b/scripts/mount-admin-tools-bucket.sh new file mode 100755 index 000000000000..5ad96daa46d1 --- /dev/null +++ b/scripts/mount-admin-tools-bucket.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# MIT License +# +# (C) Copyright [2024] Hewlett Packard Enterprise Development LP +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +# Mount admin-tools S3 bucket +s3_bucket="admin-tools" +s3fs_mount_dir="/var/lib/admin-tools" +s3_user="admin-tools" + +s3fs_cache_dir=/var/lib/s3fs_cache +if [ -d ${s3fs_cache_dir} ]; then + s3fs_opts="use_path_request_style,use_cache=${s3fs_cache_dir},check_cache_dir_exist,use_xattr" +else + s3fs_opts="use_path_request_style,use_xattr" +fi + +echo "Configuring for ${s3_bucket} S3 bucket at ${s3fs_mount_dir} for ${s3_user} S3 user" + +mkdir -p ${s3fs_mount_dir} + +pwd_file=/root/.${s3_user}.s3fs +access_key=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.access_key' | base64 -d) +secret_key=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.secret_key' | base64 -d) +s3_endpoint=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.http_s3_endpoint' | base64 -d) + +echo "${access_key}:${secret_key}" > ${pwd_file} +chmod 600 ${pwd_file} + +echo "Mounting bucket: ${s3_bucket} at ${s3fs_mount_dir}" +s3fs ${s3_bucket} ${s3fs_mount_dir} -o passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} + +echo "Adding fstab entry for ${s3_bucket} S3 bucket at ${s3fs_mount_dir} for ${s3_user} S3 user" +if [[ -z $(cat /etc/fstab | grep "admin-tools" | grep "fuse.s3fs") ]]; then + echo "${s3_bucket} ${s3fs_mount_dir} fuse.s3fs _netdev,allow_other,passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} 0 0" >> /etc/fstab +else + echo "An entry in /etc/fstab already exists for ${s3_bucket} ${s3fs_mount_dir} fuse.s3fs" +fi + +echo "Set cache pruning for admin tools to 5G of the 200G volume (every 2nd hour)" +echo "0 */2 * * * root /usr/bin/prune-s3fs-cache.sh admin-tools ${s3fs_cache_dir} 5368709120 -silent" > /etc/cron.d/prune-s3fs-admin-tools-cache + +echo -e "Done mounting admin-tools S3 bucket\n" + +# Validate admin-tools S3 bucket has been mounted + +echo "/etc/fstab has the following content:" +grep fuse.s3fs /etc/fstab +exit_code=0 +if [[ -n $(cat /etc/fstab | grep "admin-tools" | grep "fuse.s3fs") ]]; then + echo -e "admin-tools was successfully added to /etc/fstab\n" +else + echo -e "Error: admin-tools fuse.s3fs mount was not added to the /etc/fstab file\n" + exit_code=1 +fi +echo "The following s3fs mounts exist:" +mount | grep 's3fs on' +if [[ -n $(mount | grep 's3fs on /var/lib/admin-tools') ]]; then + echo -e "/var/lib/admin-tools is a s3fs mount\n" +else + echo -e "Error: /var/lib/admin-tools is not a s3fs mount.\n" + exit_code=1 +fi + +if [[ $exit_code == 0 ]]; then + echo "Successfully mounted admin-tools bucket" +fi +exit $exit_code From 1a4967e37638b452e642081ed38f76de9dd0bb98 Mon Sep 17 00:00:00 2001 From: Lindsay Eliasen <87664908+leliasen-hpe@users.noreply.github.com> Date: Fri, 30 Aug 2024 10:04:16 -0500 Subject: [PATCH 3/5] CASMPET-7209 Apply script improvement suggestions from code review Co-authored-by: Russell Bunch Signed-off-by: Lindsay Eliasen <87664908+leliasen-hpe@users.noreply.github.com> (cherry picked from commit 35b51cc5e40601a1b3b1664c485d5f9ed9a0d96d) --- scripts/mount-admin-tools-bucket.sh | 57 +++++++++++++++++++---------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/scripts/mount-admin-tools-bucket.sh b/scripts/mount-admin-tools-bucket.sh index 5ad96daa46d1..e8f8ab7b4a72 100755 --- a/scripts/mount-admin-tools-bucket.sh +++ b/scripts/mount-admin-tools-bucket.sh @@ -20,13 +20,12 @@ # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. - +set -euo pipefail # Mount admin-tools S3 bucket -s3_bucket="admin-tools" -s3fs_mount_dir="/var/lib/admin-tools" -s3_user="admin-tools" - -s3fs_cache_dir=/var/lib/s3fs_cache +s3_bucket="${S3_BUCKET:-admin-tools}" +s3fs_mount_dir="${S3FS_MOUNT_DIR:-/var/lib/admin-tools}" +s3_user="${S3_USER:-admin-tools}" +s3fs_cache_dir="${S3FS_CACHE_DIR:-/var/lib/s3fs_cache}" if [ -d ${s3fs_cache_dir} ]; then s3fs_opts="use_path_request_style,use_cache=${s3fs_cache_dir},check_cache_dir_exist,use_xattr" else @@ -35,12 +34,28 @@ fi echo "Configuring for ${s3_bucket} S3 bucket at ${s3fs_mount_dir} for ${s3_user} S3 user" -mkdir -p ${s3fs_mount_dir} +if [ ! -d "${s3fs_mount_dir}" ]; then + mkdir -pv "${s3fs_mount_dir}" +fi -pwd_file=/root/.${s3_user}.s3fs -access_key=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.access_key' | base64 -d) -secret_key=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.secret_key' | base64 -d) -s3_endpoint=$(kubectl get secret ${s3_user}-s3-credentials -o json | jq -r '.data.http_s3_endpoint' | base64 -d) +pwd_file="/root/.${s3_user}.s3fs" +secret_name="${s3_user}-s3-credentials" +s3_user_credentials="$( + if ! kubectl get secret "$secret_name" -o json 2>/dev/null | jq -r '.data' 2>/dev/null; then + echo >&2 "Failed to obtain credential data for user: [$s3_user]" + fi +)" +if [ -z "$s3_user_credentials" ]; then + echo "Exiting." + exit 1 +fi +access_key="$(jq -n -r --argjson s3_user_credentials "$s3_user_credentials" '$s3_user_credentials.access_key' | base64 -d)" +secret_key="$(jq -n -r --argjson s3_user_credentials "$s3_user_credentials" '$s3_user_credentials.secret_key' | base64 -d)" +s3_endpoint="$(jq -n -r --argjson s3_user_credentials "$s3_user_credentials" '$s3_user_credentials.http_s3_endpoint' | base64 -d)" +if [ "$access_key" = 'null' ] || [ "$secret_key" = 'null' ] || [ "$s3_endpoint" = 'null' ]; then + echo >&2 "Failed to find access_key, secret_key, or http_s3_endpoint for [$s3_user]" + exit 1 +fi echo "${access_key}:${secret_key}" > ${pwd_file} chmod 600 ${pwd_file} @@ -49,23 +64,23 @@ echo "Mounting bucket: ${s3_bucket} at ${s3fs_mount_dir}" s3fs ${s3_bucket} ${s3fs_mount_dir} -o passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} echo "Adding fstab entry for ${s3_bucket} S3 bucket at ${s3fs_mount_dir} for ${s3_user} S3 user" -if [[ -z $(cat /etc/fstab | grep "admin-tools" | grep "fuse.s3fs") ]]; then +if grep "${s3_bucket}" /etc/fstab | grep -q "fuse.s3fs"; then echo "${s3_bucket} ${s3fs_mount_dir} fuse.s3fs _netdev,allow_other,passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} 0 0" >> /etc/fstab else echo "An entry in /etc/fstab already exists for ${s3_bucket} ${s3fs_mount_dir} fuse.s3fs" fi echo "Set cache pruning for admin tools to 5G of the 200G volume (every 2nd hour)" -echo "0 */2 * * * root /usr/bin/prune-s3fs-cache.sh admin-tools ${s3fs_cache_dir} 5368709120 -silent" > /etc/cron.d/prune-s3fs-admin-tools-cache +echo "0 */2 * * * root /usr/bin/prune-s3fs-cache.sh ${s3_bucket} ${s3fs_cache_dir} 5368709120 -silent" > /etc/cron.d/prune-s3fs-${s3_bucket}-cache -echo -e "Done mounting admin-tools S3 bucket\n" +echo -e "Done mounting ${s3_bucket} S3 bucket\n" # Validate admin-tools S3 bucket has been mounted echo "/etc/fstab has the following content:" grep fuse.s3fs /etc/fstab exit_code=0 -if [[ -n $(cat /etc/fstab | grep "admin-tools" | grep "fuse.s3fs") ]]; then +if grep "$s3_bucket" /etc/fstab | grep -q "fuse.s3fs"; then echo -e "admin-tools was successfully added to /etc/fstab\n" else echo -e "Error: admin-tools fuse.s3fs mount was not added to the /etc/fstab file\n" @@ -73,14 +88,16 @@ else fi echo "The following s3fs mounts exist:" mount | grep 's3fs on' -if [[ -n $(mount | grep 's3fs on /var/lib/admin-tools') ]]; then - echo -e "/var/lib/admin-tools is a s3fs mount\n" +if mount | grep -q 's3fs on '"$s3fs_mount_dir"'; then + echo -e "$s3fs_mount_dir is an s3fs mount\n" else - echo -e "Error: /var/lib/admin-tools is not a s3fs mount.\n" + echo -e "Error: ${s3fs_mount_dir} is not an s3fs mount.\n" exit_code=1 fi -if [[ $exit_code == 0 ]]; then - echo "Successfully mounted admin-tools bucket" +if [ "$exit_code" -eq 0 ]; then + echo "Successfully mounted ${s3_bucket} bucket" +else + echo "Errors encountered. Please review script output." fi exit $exit_code From e841ba0665d3632bdea74b7914a5a33406db851b Mon Sep 17 00:00:00 2001 From: Russell Bunch Date: Fri, 30 Aug 2024 14:31:39 -0500 Subject: [PATCH 4/5] Apply suggestions from code review Signed-off-by: Russell Bunch (cherry picked from commit 9bd1655112ba96bba662c688b7d629cedf9a55c0) --- scripts/mount-admin-tools-bucket.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/mount-admin-tools-bucket.sh b/scripts/mount-admin-tools-bucket.sh index e8f8ab7b4a72..fcb4965e05fb 100755 --- a/scripts/mount-admin-tools-bucket.sh +++ b/scripts/mount-admin-tools-bucket.sh @@ -70,20 +70,20 @@ else echo "An entry in /etc/fstab already exists for ${s3_bucket} ${s3fs_mount_dir} fuse.s3fs" fi -echo "Set cache pruning for admin tools to 5G of the 200G volume (every 2nd hour)" +echo "Set cache pruning for s3_bucket to 5G of the 200G volume (every 2nd hour)" echo "0 */2 * * * root /usr/bin/prune-s3fs-cache.sh ${s3_bucket} ${s3fs_cache_dir} 5368709120 -silent" > /etc/cron.d/prune-s3fs-${s3_bucket}-cache echo -e "Done mounting ${s3_bucket} S3 bucket\n" -# Validate admin-tools S3 bucket has been mounted +# Validate S3 bucket has been mounted echo "/etc/fstab has the following content:" grep fuse.s3fs /etc/fstab exit_code=0 if grep "$s3_bucket" /etc/fstab | grep -q "fuse.s3fs"; then - echo -e "admin-tools was successfully added to /etc/fstab\n" + echo -e "${s3_bucket} was successfully added to /etc/fstab\n" else - echo -e "Error: admin-tools fuse.s3fs mount was not added to the /etc/fstab file\n" + echo -e "Error: ${s3_bucket} fuse.s3fs mount was not added to the /etc/fstab file\n" exit_code=1 fi echo "The following s3fs mounts exist:" From 02720fcfa3bb088db679975c987170ec367a4b30 Mon Sep 17 00:00:00 2001 From: Lindsay Eliasen Date: Wed, 4 Sep 2024 16:49:34 -0500 Subject: [PATCH 5/5] CASMPET-7209 fix syntax in mount-admin-tools-bucket.sh (cherry picked from commit 9bd91eb85d7cf244bfb5b68b432b429c445cac94) --- scripts/mount-admin-tools-bucket.sh | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/scripts/mount-admin-tools-bucket.sh b/scripts/mount-admin-tools-bucket.sh index fcb4965e05fb..ea64aba107c4 100755 --- a/scripts/mount-admin-tools-bucket.sh +++ b/scripts/mount-admin-tools-bucket.sh @@ -41,7 +41,7 @@ fi pwd_file="/root/.${s3_user}.s3fs" secret_name="${s3_user}-s3-credentials" s3_user_credentials="$( - if ! kubectl get secret "$secret_name" -o json 2>/dev/null | jq -r '.data' 2>/dev/null; then + if ! kubectl get secret "$secret_name" -o json 2> /dev/null | jq -r '.data' 2> /dev/null; then echo >&2 "Failed to obtain credential data for user: [$s3_user]" fi )" @@ -61,16 +61,19 @@ echo "${access_key}:${secret_key}" > ${pwd_file} chmod 600 ${pwd_file} echo "Mounting bucket: ${s3_bucket} at ${s3fs_mount_dir}" -s3fs ${s3_bucket} ${s3fs_mount_dir} -o passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} +if ! s3fs ${s3_bucket} ${s3fs_mount_dir} -o passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts}; then + echo "Error: Check that ${s3_bucket} is not already mounted and ${s3fs_mount_dir} is empty." + exit 1 +fi echo "Adding fstab entry for ${s3_bucket} S3 bucket at ${s3fs_mount_dir} for ${s3_user} S3 user" -if grep "${s3_bucket}" /etc/fstab | grep -q "fuse.s3fs"; then +if ! grep "${s3_bucket}" /etc/fstab | grep -q "fuse.s3fs"; then echo "${s3_bucket} ${s3fs_mount_dir} fuse.s3fs _netdev,allow_other,passwd_file=${pwd_file},url=${s3_endpoint},${s3fs_opts} 0 0" >> /etc/fstab else echo "An entry in /etc/fstab already exists for ${s3_bucket} ${s3fs_mount_dir} fuse.s3fs" fi -echo "Set cache pruning for s3_bucket to 5G of the 200G volume (every 2nd hour)" +echo "Set cache pruning for ${s3_bucket} to 5G of the 200G volume (every 2nd hour)" echo "0 */2 * * * root /usr/bin/prune-s3fs-cache.sh ${s3_bucket} ${s3fs_cache_dir} 5368709120 -silent" > /etc/cron.d/prune-s3fs-${s3_bucket}-cache echo -e "Done mounting ${s3_bucket} S3 bucket\n" @@ -88,7 +91,7 @@ else fi echo "The following s3fs mounts exist:" mount | grep 's3fs on' -if mount | grep -q 's3fs on '"$s3fs_mount_dir"'; then +if mount | grep -q 's3fs on '"$s3fs_mount_dir"; then echo -e "$s3fs_mount_dir is an s3fs mount\n" else echo -e "Error: ${s3fs_mount_dir} is not an s3fs mount.\n"