diff --git a/scripts/shared/backport.sh b/scripts/shared/backport.sh index 229f56e53..31461be7b 100755 --- a/scripts/shared/backport.sh +++ b/scripts/shared/backport.sh @@ -158,7 +158,7 @@ EOF gh pr create --title="Automated backport of ${numandtitle}" --body="${prtext}" --head "${GITHUB_USER}:${NEWBRANCH}" --base "${rel}" --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}" --label automated-backport for pull in "${PULLS[@]}"; do echo "+++ Adding 'backport-handled' label to" - gh pr edit $pull --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}" --add-label "backport-handled" + gh pr edit "$pull" --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}" --add-label "backport-handled" done } diff --git a/scripts/shared/build_image.sh b/scripts/shared/build_image.sh index 916dcfad4..90f1411cd 100755 --- a/scripts/shared/build_image.sh +++ b/scripts/shared/build_image.sh @@ -2,7 +2,7 @@ ## Process command line flags ## -source ${SCRIPTS_DIR}/lib/shflags +source "${SCRIPTS_DIR}/lib/shflags" DEFINE_string 'tag' "${DEV_VERSION}" "Tag to set for the local image" DEFINE_string 'repo' 'quay.io/submariner' "Quay.io repo to use for the image" DEFINE_string 'image' '' "Image name to build" 'i' @@ -33,7 +33,7 @@ if [[ "${platform}" =~ , && -z "${ocifile}" ]]; then exit 1 fi -source ${SCRIPTS_DIR}/lib/debug_functions +source "${SCRIPTS_DIR}/lib/debug_functions" set -e local_image=${repo}/${image}:${tag} @@ -43,8 +43,8 @@ cache_image=${repo}/${image}:${CUTTING_EDGE} cache_flag='' if [[ "$cache" = true ]]; then cache_flag="--cache-from ${cache_image}" - if [[ -z "$(docker image ls -q ${cache_image})" ]]; then - docker pull ${cache_image} || : + if [[ -z "$(docker image ls -q "${cache_image}")" ]]; then + docker pull "${cache_image}" || : fi # The shellcheck linting tool recommends piping to a while read loop, but that doesn't work for us # because the while loop ends up in a subshell @@ -57,7 +57,7 @@ if [[ "$cache" = true ]]; then } }' "${dockerfile}"); do cache_flag+=" --cache-from ${parent}" - docker pull ${parent} || : + docker pull "${parent}" || : done fi @@ -78,14 +78,14 @@ buildargs_flag="--build-arg BUILDKIT_INLINE_CACHE=1 --build-arg BASE_BRANCH=${BA [[ -z "${buildargs}" ]] || buildargs_flag="${buildargs_flag} --build-arg ${buildargs}" if [[ "${platform}" != "${default_platform}" ]] && docker buildx version > /dev/null 2>&1; then docker buildx use buildx_builder || docker buildx create --name buildx_builder --use - docker buildx build ${output_flag} -t ${local_image} ${cache_flag} -f ${dockerfile} --iidfile "${hashfile}" --platform ${platform} ${buildargs_flag} . + docker buildx build "${output_flag}" -t "${local_image}" ${cache_flag} -f "${dockerfile}" --iidfile "${hashfile}" --platform "${platform}" ${buildargs_flag} . else # Fall back to plain BuildKit if [[ "${platform}" != "${default_platform}" ]]; then echo "WARNING: buildx isn't available, cross-arch builds won't work as expected" fi - DOCKER_BUILDKIT=1 docker build -t ${local_image} ${cache_flag} -f ${dockerfile} --iidfile "${hashfile}" ${buildargs_flag} . + DOCKER_BUILDKIT=1 docker build -t "${local_image}" ${cache_flag} -f "${dockerfile}" --iidfile "${hashfile}" ${buildargs_flag} . fi # We can only tag the image in non-OCI mode -[[ -n "${ocifile}" ]] || docker tag ${local_image} ${cache_image} +[[ -n "${ocifile}" ]] || docker tag "${local_image}" "${cache_image}" diff --git a/scripts/shared/cleanup.sh b/scripts/shared/cleanup.sh index d09c00b51..82d86815d 100755 --- a/scripts/shared/cleanup.sh +++ b/scripts/shared/cleanup.sh @@ -2,7 +2,7 @@ ## Process command line flags ## -source ${SCRIPTS_DIR}/lib/shflags +source "${SCRIPTS_DIR}/lib/shflags" DEFINE_string 'plugin' '' "Path to the plugin that has pre_cleanup and post_cleanup hook" FLAGS "$@" || exit $? @@ -10,22 +10,22 @@ eval set -- "${FLAGS_ARGV}" set -em -source ${SCRIPTS_DIR}/lib/debug_functions -source ${SCRIPTS_DIR}/lib/utils +source "${SCRIPTS_DIR}/lib/debug_functions" +source "${SCRIPTS_DIR}/lib/utils" # Source plugin if the path is passed via plugin argument and the file exists -[[ -n "${FLAGS_plugin}" ]] && [[ -f "${FLAGS_plugin}" ]] && source ${FLAGS_plugin} +[[ -n "${FLAGS_plugin}" ]] && [[ -f "${FLAGS_plugin}" ]] && source "${FLAGS_plugin}" ### Functions ### function delete_cluster() { - kind delete cluster --name=${cluster}; + kind delete cluster --name="${cluster}" } function stop_local_registry { if registry_running; then echo "Stopping local KIND registry..." - docker stop $KIND_REGISTRY + docker stop "$KIND_REGISTRY" fi } @@ -37,7 +37,7 @@ clusters=($(kind get clusters)) run_if_defined pre_cleanup run_parallel "${clusters[*]}" delete_cluster -[[ -z "${DAPPER_OUTPUT}" ]] || rm -rf ${DAPPER_OUTPUT}/* +[[ -z "${DAPPER_OUTPUT}" ]] || rm -rf "${DAPPER_OUTPUT}"/* stop_local_registry docker system prune --volumes -f diff --git a/scripts/shared/clusters.sh b/scripts/shared/clusters.sh index 78ee1bf0a..7e2ad8834 100755 --- a/scripts/shared/clusters.sh +++ b/scripts/shared/clusters.sh @@ -14,7 +14,7 @@ kind_k8s_versions[1.23]=1.23.4@sha256:0e34f0d0fd448aa2f2819cfd74e99fe5793a6e4938 ## Process command line flags ## -source ${SCRIPTS_DIR}/lib/shflags +source "${SCRIPTS_DIR}/lib/shflags" DEFINE_string 'k8s_version' "${DEFAULT_K8S_VERSION}" 'Version of K8s to use' DEFINE_string 'olm_version' 'v0.18.3' 'Version of OLM to use' DEFINE_boolean 'olm' false 'Deploy OLM' @@ -41,8 +41,8 @@ echo "Running with: k8s_version=${k8s_version}, olm_version=${olm_version}, olm= set -em -source ${SCRIPTS_DIR}/lib/debug_functions -source ${SCRIPTS_DIR}/lib/utils +source "${SCRIPTS_DIR}/lib/debug_functions" +source "${SCRIPTS_DIR}/lib/utils" ### Functions ### @@ -56,17 +56,17 @@ function generate_cluster_yaml() { local nodes for node in ${cluster_nodes[${cluster}]}; do nodes="${nodes}"$'\n'"- role: $node"; done - render_template ${RESOURCES_DIR}/kind-cluster-config.yaml > ${RESOURCES_DIR}/${cluster}-config.yaml + render_template "${RESOURCES_DIR}/kind-cluster-config.yaml" > "${RESOURCES_DIR}/${cluster}-config.yaml" } function kind_fixup_config() { - local master_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${cluster}-control-plane | head -n 1) - sed -i -- "s/server: .*/server: https:\/\/$master_ip:6443/g" $KUBECONFIG - sed -i -- "s/user: kind-.*/user: ${cluster}/g" $KUBECONFIG - sed -i -- "s/name: kind-.*/name: ${cluster}/g" $KUBECONFIG - sed -i -- "s/cluster: kind-.*/cluster: ${cluster}/g" $KUBECONFIG - sed -i -- "s/current-context: .*/current-context: ${cluster}/g" $KUBECONFIG - chmod a+r $KUBECONFIG + local master_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${cluster}-control-plane" | head -n 1) + sed -i -- "s/server: .*/server: https:\/\/$master_ip:6443/g" "$KUBECONFIG" + sed -i -- "s/user: kind-.*/user: ${cluster}/g" "$KUBECONFIG" + sed -i -- "s/name: kind-.*/name: ${cluster}/g" "$KUBECONFIG" + sed -i -- "s/cluster: kind-.*/cluster: ${cluster}/g" "$KUBECONFIG" + sed -i -- "s/current-context: .*/current-context: ${cluster}/g" "$KUBECONFIG" + chmod a+r "$KUBECONFIG" } # In development environments where clusters are brought up and down @@ -113,7 +113,7 @@ function create_kind_cluster() { if kind get clusters | grep -q "^${cluster}$"; then echo "KIND cluster already exists, skipping its creation..." - kind export kubeconfig --name=${cluster} + kind export kubeconfig --name="${cluster}" kind_fixup_config return fi @@ -131,15 +131,15 @@ function create_kind_cluster() { fi kind version - cat ${RESOURCES_DIR}/${cluster}-config.yaml - kind create cluster $image_flag --name=${cluster} --config=${RESOURCES_DIR}/${cluster}-config.yaml + cat "${RESOURCES_DIR}/${cluster}-config.yaml" + kind create cluster ${image_flag:+"$image_flag"} --name="${cluster}" --config="${RESOURCES_DIR}/${cluster}-config.yaml" kind_fixup_config ( deploy_cluster_capabilities; ) & if ! wait $! ; then echo "Failed to deploy cluster capabilities, removing the cluster" kubectl cluster-info dump 1>&2 - kind delete cluster --name=${cluster} + kind delete cluster --name="${cluster}" return 1 fi } @@ -164,7 +164,7 @@ function deploy_weave_cni(){ # Check if image is already present, and if not, download it. echo "Processing Image: $image" - if [ -z "`docker images -q $image`" ] ; then + if [ -z "`docker images -q "$image"`" ] ; then echo "Image $image not found, downloading..." if ! docker pull "$image"; then echo "**** 'docker pull $image' failed. Manually run. ****" @@ -177,7 +177,7 @@ function deploy_weave_cni(){ if [ "${IMAGE_FAILURE}" == false ] ; then LCL_REG_IMAGE_NAME="${image/weaveworks/localhost:5000}" # Copy image to local registry if not there - if [ -z "$(docker images -q ${LCL_REG_IMAGE_NAME})" ] ; then + if [ -z "$(docker images -q "${LCL_REG_IMAGE_NAME}")" ] ; then echo "Image ${LCL_REG_IMAGE_NAME} not found, tagging and pushing ..." if ! docker tag "$image" "${LCL_REG_IMAGE_NAME}"; then echo "'docker tag $image ${LCL_REG_IMAGE_NAME}' failed." @@ -229,17 +229,17 @@ function deploy_kind_ovn(){ docker tag "${OVN_SRC_IMAGE}" "${OVN_IMAGE}" docker push "${OVN_IMAGE}" - ( ./ovn-kubernetes/contrib/kind.sh -ov $OVN_IMAGE -cn ${KIND_CLUSTER_NAME} -ric -lr -dd ${KIND_CLUSTER_NAME}.local; ) & + ( ./ovn-kubernetes/contrib/kind.sh -ov "$OVN_IMAGE" -cn "${KIND_CLUSTER_NAME}" -ric -lr -dd "${KIND_CLUSTER_NAME}.local"; ) & if ! wait $! ; then echo "Failed to install kind with OVN" - kind delete cluster --name=${cluster} + kind delete cluster --name="${cluster}" return 1 fi ( deploy_cluster_capabilities; ) & if ! wait $! ; then echo "Failed to deploy cluster capabilities, removing the cluster" - kind delete cluster --name=${cluster} + kind delete cluster --name="${cluster}" return 1 fi } @@ -295,11 +295,11 @@ function deploy_prometheus() { # TODO Install in a separate namespace kubectl create ns submariner-operator # Bundle from prometheus-operator, namespace changed to submariner-operator - kubectl apply -f ${SCRIPTS_DIR}/resources/prometheus/bundle.yaml - kubectl apply -f ${SCRIPTS_DIR}/resources/prometheus/serviceaccount.yaml - kubectl apply -f ${SCRIPTS_DIR}/resources/prometheus/clusterrole.yaml - kubectl apply -f ${SCRIPTS_DIR}/resources/prometheus/clusterrolebinding.yaml - kubectl apply -f ${SCRIPTS_DIR}/resources/prometheus/prometheus.yaml + kubectl apply -f "${SCRIPTS_DIR}/resources/prometheus/bundle.yaml" + kubectl apply -f "${SCRIPTS_DIR}/resources/prometheus/serviceaccount.yaml" + kubectl apply -f "${SCRIPTS_DIR}/resources/prometheus/clusterrole.yaml" + kubectl apply -f "${SCRIPTS_DIR}/resources/prometheus/clusterrolebinding.yaml" + kubectl apply -f "${SCRIPTS_DIR}/resources/prometheus/prometheus.yaml" } function deploy_cluster_capabilities() { @@ -334,8 +334,8 @@ function download_ovnk() { ### Main ### -rm -rf ${KUBECONFIGS_DIR} -mkdir -p ${KUBECONFIGS_DIR} +rm -rf "${KUBECONFIGS_DIR}" +mkdir -p "${KUBECONFIGS_DIR}" download_kind load_settings diff --git a/scripts/shared/compile.sh b/scripts/shared/compile.sh index 5790dba0e..820fa4098 100755 --- a/scripts/shared/compile.sh +++ b/scripts/shared/compile.sh @@ -2,7 +2,7 @@ ## Process command line flags ## -source ${SCRIPTS_DIR}/lib/shflags +source "${SCRIPTS_DIR}/lib/shflags" DEFINE_boolean 'debug' false "Build the binary with debug information included (or stripped)" DEFINE_boolean 'upx' true "Use UPX to make the binary smaller (only when --nodebug)" DEFINE_string 'ldflags' '' "Extra flags to send to the Go compiler" @@ -23,17 +23,16 @@ source_file=$2 set -e -source ${SCRIPTS_DIR}/lib/debug_functions +source "${SCRIPTS_DIR}/lib/debug_functions" ## Main ## -mkdir -p ${binary%/*} +mkdir -p "${binary%/*}" echo "Building ${binary@Q} (ldflags: ${ldflags@Q})" if [ "$build_debug" = "false" ]; then ldflags="-s -w ${ldflags}" fi -CGO_ENABLED=0 ${GO:-go} build -trimpath -ldflags "${ldflags}" -o $binary $source_file -[[ "$build_upx" = "false" ]] || [[ "$build_debug" = "true" ]] || upx $binary - +CGO_ENABLED=0 ${GO:-go} build -trimpath -ldflags "${ldflags}" -o "$binary" "$source_file" +[[ "$build_upx" = "false" ]] || [[ "$build_debug" = "true" ]] || upx "$binary" diff --git a/scripts/shared/deploy.sh b/scripts/shared/deploy.sh index 29a61fed6..74dfb4bb2 100755 --- a/scripts/shared/deploy.sh +++ b/scripts/shared/deploy.sh @@ -2,7 +2,7 @@ ## Process command line flags ## -source ${SCRIPTS_DIR}/lib/shflags +source "${SCRIPTS_DIR}/lib/shflags" DEFINE_string 'settings' '' "Settings YAML file to customize cluster deployments" DEFINE_string 'deploytool' 'operator' 'Tool to use for deploying (operator/helm/bundle/ocm)' DEFINE_string 'deploytool_broker_args' '' 'Any extra arguments to pass to the deploytool when deploying the broker' @@ -31,12 +31,12 @@ echo "Running with: globalnet=${globalnet@Q}, deploytool=${deploytool@Q}, deploy set -em -source ${SCRIPTS_DIR}/lib/debug_functions -source ${SCRIPTS_DIR}/lib/utils -source ${SCRIPTS_DIR}/lib/deploy_funcs +source "${SCRIPTS_DIR}/lib/debug_functions" +source "${SCRIPTS_DIR}/lib/utils" +source "${SCRIPTS_DIR}/lib/deploy_funcs" # Source plugin if the path is passed via plugin argument and the file exists -[[ -n "${FLAGS_plugin}" ]] && [[ -f "${FLAGS_plugin}" ]] && source ${FLAGS_plugin} +[[ -n "${FLAGS_plugin}" ]] && [[ -f "${FLAGS_plugin}" ]] && source "${FLAGS_plugin}" ### Constants ### readonly CE_IPSEC_IKEPORT=500 @@ -138,14 +138,14 @@ declare_kubeconfig bash -c "curl -Ls https://get.submariner.io | VERSION=${CUTTING_EDGE} DESTDIR=/go/bin bash" || bash -c "curl -Ls https://get.submariner.io | VERSION=devel DESTDIR=/go/bin bash" -load_deploytool $deploytool +load_deploytool "$deploytool" deploytool_prereqs run_if_defined pre_deploy run_subm_clusters prepare_cluster -with_context $broker setup_broker +with_context "$broker" setup_broker install_subm_all_clusters if [ "${#cluster_subm[@]}" -gt 1 ]; then diff --git a/scripts/shared/e2e.sh b/scripts/shared/e2e.sh index c60623031..4a2719439 100755 --- a/scripts/shared/e2e.sh +++ b/scripts/shared/e2e.sh @@ -2,7 +2,7 @@ ## Process command line flags ## -source ${SCRIPTS_DIR}/lib/shflags +source "${SCRIPTS_DIR}/lib/shflags" DEFINE_string 'focus' '' "Ginkgo focus for the E2E tests" DEFINE_string 'skip' '' "Ginkgo skip for the E2E tests" DEFINE_string 'testdir' 'test/e2e' "Directory under to be used for E2E testing" @@ -27,8 +27,8 @@ context_clusters=("$@") set -em -o pipefail -source ${SCRIPTS_DIR}/lib/debug_functions -source ${SCRIPTS_DIR}/lib/utils +source "${SCRIPTS_DIR}/lib/debug_functions" +source "${SCRIPTS_DIR}/lib/utils" ### Functions ### @@ -43,7 +43,7 @@ function deploy_env_once() { } function generate_context_flags() { - for cluster in ${context_clusters[*]}; do + for cluster in "${context_clusters[@]}"; do printf " -dp-context $cluster" done } @@ -61,14 +61,14 @@ function generate_kubecontexts() { } function test_with_e2e_tests { - cd ${DAPPER_SOURCE}/${FLAGS_testdir} + cd "${DAPPER_SOURCE}/${FLAGS_testdir}" ${GO:-go} test -v -timeout 30m -args -ginkgo.v -ginkgo.randomizeAllSpecs -ginkgo.trace\ - -submariner-namespace $SUBM_NS $(generate_context_flags) ${globalnet} \ + -submariner-namespace $SUBM_NS $(generate_context_flags) ${globalnet:+"$globalnet"} \ -ginkgo.reportPassed -test.timeout 15m \ "${ginkgo_args[@]}" \ - -ginkgo.reportFile ${DAPPER_OUTPUT}/e2e-junit.xml 2>&1 | \ - tee ${DAPPER_OUTPUT}/e2e-tests.log + -ginkgo.reportFile "${DAPPER_OUTPUT}/e2e-junit.xml" 2>&1 | \ + tee "${DAPPER_OUTPUT}/e2e-tests.log" } function test_with_subctl { @@ -80,7 +80,7 @@ function test_with_subctl { declare_kubeconfig [[ "${lazy_deploy}" = "false" ]] || deploy_env_once -if [ -d ${DAPPER_SOURCE}/${FLAGS_testdir} ]; then +if [ -d "${DAPPER_SOURCE}/${FLAGS_testdir}" ]; then test_with_e2e_tests else test_with_subctl diff --git a/scripts/shared/entry b/scripts/shared/entry index f1717cfa6..67605ac1d 100755 --- a/scripts/shared/entry +++ b/scripts/shared/entry @@ -1,13 +1,13 @@ #!/bin/bash set -e -source ${SCRIPTS_DIR}/lib/debug_functions +source "${SCRIPTS_DIR}/lib/debug_functions" trap "chown -R $DAPPER_UID:$DAPPER_GID ." exit mkdir -p bin dist output -if [ -e ./scripts/$1 ]; then +if [ -e "./scripts/$1" ]; then ./scripts/"$@" else "$@" diff --git a/scripts/shared/post_mortem.sh b/scripts/shared/post_mortem.sh index a89f3f22b..c21c84f72 100755 --- a/scripts/shared/post_mortem.sh +++ b/scripts/shared/post_mortem.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -source ${SCRIPTS_DIR}/lib/debug_functions -source ${SCRIPTS_DIR}/lib/utils +source "${SCRIPTS_DIR}/lib/debug_functions" +source "${SCRIPTS_DIR}/lib/utils" ### Functions ### @@ -18,12 +18,12 @@ function print_pods_logs() { print_section "** Pods logs for NS $namespace using selector '$selector' **" for pod in $(kubectl get pods --selector="$selector" -n "$namespace" -o jsonpath='{.items[*].metadata.name}'); do - if [ "$(kubectl get pods -n $namespace $pod -o jsonpath='{.status.containerStatuses[*].ready}')" != true ]; then + if [ "$(kubectl get pods -n "$namespace" "$pod" -o jsonpath='{.status.containerStatuses[*].ready}')" != true ]; then print_section "*** $pod (terminated) ***" - kubectl -n $namespace logs -p $pod + kubectl -n "$namespace" logs -p "$pod" else print_section "*** $pod ***" - kubectl -n $namespace logs $pod + kubectl -n "$namespace" logs "$pod" fi done } @@ -37,11 +37,11 @@ function post_analyze() { print_section "* Details of pods with statuses other than Running in $cluster *" for pod in $(kubectl get pods -A | tail -n +2 | grep -v Running | sed 's/ */;/g'); do - ns=$(echo $pod | cut -f1 -d';') - name=$(echo $pod | cut -f2 -d';') + ns=$(echo "$pod" | cut -f1 -d';') + name=$(echo "$pod" | cut -f2 -d';') print_section "** NS: $ns; Pod: $name **" - kubectl -n $ns describe pod $name - kubectl -n $ns logs $name + kubectl -n "$ns" describe pod "$name" + kubectl -n "$ns" logs "$name" done print_section "* Kube-proxy pod logs for $cluster *" diff --git a/scripts/shared/release_images.sh b/scripts/shared/release_images.sh index c810e1d22..a8916d025 100755 --- a/scripts/shared/release_images.sh +++ b/scripts/shared/release_images.sh @@ -2,7 +2,7 @@ ## Process command line flags ## -source ${SCRIPTS_DIR}/lib/shflags +source "${SCRIPTS_DIR}/lib/shflags" DEFINE_string 'tag' "${CUTTING_EDGE}" "Additional tag(s) to use for the image (prefix 'v' will be stripped)" DEFINE_string 'repo' 'quay.io/submariner' "Quay.io repo to deploy to" DEFINE_string 'oci' '' 'Directory containing OCI images (for multi-arch pushes)' @@ -21,7 +21,7 @@ fi set -e -source ${SCRIPTS_DIR}/lib/debug_functions +source "${SCRIPTS_DIR}/lib/debug_functions" function release_image() { local image=$1 @@ -30,9 +30,9 @@ function release_image() { local target_image="${image}:${target_tag#v}" if [[ -z "${oci}" ]]; then # Single-arch - skopeo copy docker-daemon:${repo}/${image}:${DEV_VERSION} docker://${repo}/${target_image} + skopeo copy "docker-daemon:${repo}/${image}:${DEV_VERSION}" "docker://${repo}/${target_image}" else - skopeo copy --all oci-archive:${oci}/${image}.tar docker://${repo}/${target_image} + skopeo copy --all "oci-archive:${oci}/${image}.tar" "docker://${repo}/${target_image}" fi done } @@ -40,6 +40,6 @@ function release_image() { echo "$QUAY_PASSWORD" | skopeo login quay.io -u "$QUAY_USERNAME" --password-stdin for image; do - release_image ${image} + release_image "${image}" done diff --git a/scripts/shared/reload_images.sh b/scripts/shared/reload_images.sh index 206972a7b..ace227a81 100755 --- a/scripts/shared/reload_images.sh +++ b/scripts/shared/reload_images.sh @@ -2,9 +2,9 @@ set -e -source ${SCRIPTS_DIR}/lib/utils -source ${SCRIPTS_DIR}/lib/debug_functions -source ${SCRIPTS_DIR}/lib/deploy_funcs +source "${SCRIPTS_DIR}/lib/utils" +source "${SCRIPTS_DIR}/lib/debug_functions" +source "${SCRIPTS_DIR}/lib/deploy_funcs" function find_resources() { local resource_type=$1 diff --git a/scripts/shared/targets.sh b/scripts/shared/targets.sh index 5a94dbc05..866ec79b1 100755 --- a/scripts/shared/targets.sh +++ b/scripts/shared/targets.sh @@ -9,7 +9,7 @@ print_indent Target Description | tee >(tr '[:alnum:]' '-') make_targets=($(make -pRrq : 2>/dev/null |\ grep -oP '^(?!Makefile.*)[-[:alnum:]]*(?=:)' | sort -u)) -for target in ${make_targets[*]}; do - description=$(grep -hoP -m1 "(?<=\[${target}\] ).*" Makefile* ${SHIPYARD_DIR}/Makefile* | head -1) +for target in "${make_targets[@]}"; do + description=$(grep -hoP -m1 "(?<=\[${target}\] ).*" Makefile* "${SHIPYARD_DIR}"/Makefile* | head -1) print_indent "${target}" "${description}" done diff --git a/scripts/shared/unit_test.sh b/scripts/shared/unit_test.sh index 8381eecc7..6fa0fbe8a 100755 --- a/scripts/shared/unit_test.sh +++ b/scripts/shared/unit_test.sh @@ -2,8 +2,8 @@ set -e -source ${SCRIPTS_DIR}/lib/debug_functions -source ${SCRIPTS_DIR}/lib/find_functions +source "${SCRIPTS_DIR}/lib/debug_functions" +source "${SCRIPTS_DIR}/lib/find_functions" echo "Looking for packages to test" @@ -12,7 +12,7 @@ modules=($(find_modules)) result=0 for module in "${modules[@]}"; do - printf "Looking for tests in module %s\n" ${module} + printf "Looking for tests in module %s\n" "${module}" excluded_modules="" for exc_module in "${modules[@]}"; do @@ -21,12 +21,12 @@ for module in "${modules[@]}"; do fi done - packages="$(cd $module; find_unit_test_dirs "$excluded_modules" "$@" | tr '\n' ' ')" + packages="$(cd "$module"; find_unit_test_dirs "$excluded_modules" "$@" | tr '\n' ' ')" if [ -n "${packages}" ]; then echo "Running tests in ${packages}" [ "${ARCH}" == "amd64" ] && race=-race - (cd $module && ${GO:-go} test -v ${race} -cover ${packages} -ginkgo.v -ginkgo.trace -ginkgo.reportPassed -ginkgo.reportFile junit.xml "$@") || result=1 + (cd "$module" && ${GO:-go} test -v ${race} -cover "${packages}" -ginkgo.v -ginkgo.trace -ginkgo.reportPassed -ginkgo.reportFile junit.xml "$@") || result=1 fi done