Skip to content

Commit

Permalink
Fix rebase error on ci-entrypoint script
Browse files Browse the repository at this point in the history
  • Loading branch information
CecileRobertMichon committed Nov 30, 2022
1 parent 9e97aea commit 4470ce1
Showing 1 changed file with 18 additions and 53 deletions.
71 changes: 18 additions & 53 deletions scripts/ci-entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,18 @@ create_cluster() {
}

install_addons() {
# Get CCM cluster CIDRs from Cluster object if not set
if [[ -z "${CCM_CLUSTER_CIDR:-}" ]]; then
CCM_CLUSTER_CIDR=$(${KUBECTL} get cluster "${CLUSTER_NAME}" -o=jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[0]}')
if CIDR1=$(${KUBECTL} get cluster "${CLUSTER_NAME}" -o=jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[1]}' 2> /dev/null); then
CCM_CLUSTER_CIDR="${CCM_CLUSTER_CIDR:-}\,${CIDR1}"
fi
fi
echo "CCM cluster CIDR: ${CCM_CLUSTER_CIDR:-}"

# export the target cluster KUBECONFIG if not already set
export KUBECONFIG="${KUBECONFIG:-${PWD}/kubeconfig}"

# Copy the kubeadm configmap to the calico-system namespace. This is a workaround needed for the calico-node-windows daemonset to be able to run in the calico-system namespace.
"${KUBECTL}" create ns calico-system
"${KUBECTL}" get configmap kubeadm-config --namespace=kube-system -o yaml \
Expand Down Expand Up @@ -162,6 +174,7 @@ install_addons() {
copy_secret
fi

export CCM_LOG_VERBOSITY="${CCM_LOG_VERBOSITY:-4}"
echo "Installing cloud-provider-azure components via helm"
"${HELM}" install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name \
--set infra.clusterName="${CLUSTER_NAME}" \
Expand All @@ -174,7 +187,9 @@ install_addons() {
--set cloudControllerManager.replicas="${CCM_COUNT}" \
--set cloudControllerManager.enableDynamicReloading="${ENABLE_DYNAMIC_RELOADING}" \
--set cloudControllerManager.cloudConfig="${CLOUD_CONFIG}" \
--set cloudControllerManager.cloudConfigSecretName="${CONFIG_SECRET_NAME}"
--set cloudControllerManager.cloudConfigSecretName="${CONFIG_SECRET_NAME}" \
--set cloudControllerManager.logVerbosity="${CCM_LOG_VERBOSITY}" \
--set-string cloudControllerManager.clusterCIDR="${CCM_CLUSTER_CIDR}"
fi

echo "Waiting for all calico-system pods to be ready"
Expand Down Expand Up @@ -241,58 +256,8 @@ export ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}"
# create cluster
create_cluster

# Get CCM cluster CIDRs from Cluster object if not set
if [[ -z "${CCM_CLUSTER_CIDR:-}" ]]; then
CCM_CLUSTER_CIDR=$(${KUBECTL} get cluster "${CLUSTER_NAME}" -o=jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[0]}')
if CIDR1=$(${KUBECTL} get cluster "${CLUSTER_NAME}" -o=jsonpath='{.spec.clusterNetwork.pods.cidrBlocks[1]}' 2> /dev/null); then
CCM_CLUSTER_CIDR="${CCM_CLUSTER_CIDR:-}\,${CIDR1}"
fi
fi
echo "CCM cluster CIDR: ${CCM_CLUSTER_CIDR:-}"

# export the target cluster KUBECONFIG if not already set
export KUBECONFIG="${KUBECONFIG:-${PWD}/kubeconfig}"

export -f wait_for_nodes
timeout --foreground 1800 bash -c wait_for_nodes

# install cloud-provider-azure components, if using out-of-tree
if [[ -n "${TEST_CCM:-}" ]]; then
if [[ -n "${TEST_WINDOWS:-}" ]]; then
# "app=calico" is the label only for calico-node-windows pods
"${KUBECTL}" wait --for=condition=Ready pod -l app=calico -n kube-system --timeout=10m
fi

CLOUD_CONFIG="/etc/kubernetes/azure.json"
CONFIG_SECRET_NAME=""
ENABLE_DYNAMIC_RELOADING=false
if [[ -n "${LOAD_CLOUD_CONFIG_FROM_SECRET:-}" ]]; then
CLOUD_CONFIG=""
CONFIG_SECRET_NAME="azure-cloud-provider"
ENABLE_DYNAMIC_RELOADING=true
copy_secret
fi

export CCM_LOG_VERBOSITY="${CCM_LOG_VERBOSITY:-4}"
echo "Installing cloud-provider-azure components via helm"
"${HELM}" install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name \
--set infra.clusterName="${CLUSTER_NAME}" \
--set cloudControllerManager.imageRepository="${IMAGE_REGISTRY}" \
--set cloudNodeManager.imageRepository="${IMAGE_REGISTRY}" \
--set cloudControllerManager.imageName="${CCM_IMAGE_NAME}" \
--set cloudNodeManager.imageName="${CNM_IMAGE_NAME}" \
--set-string cloudControllerManager.imageTag="${IMAGE_TAG}" \
--set-string cloudNodeManager.imageTag="${IMAGE_TAG}" \
--set cloudControllerManager.replicas="${CCM_COUNT}" \
--set cloudControllerManager.enableDynamicReloading="${ENABLE_DYNAMIC_RELOADING}" \
--set cloudControllerManager.cloudConfig="${CLOUD_CONFIG}" \
--set cloudControllerManager.cloudConfigSecretName="${CONFIG_SECRET_NAME}" \
--set cloudControllerManager.logVerbosity="${CCM_LOG_VERBOSITY}" \
--set-string cloudControllerManager.clusterCIDR="${CCM_CLUSTER_CIDR}"

echo "Waiting for all kube-system pods to be ready"
"${KUBECTL}" wait --for=condition=Ready pod -n kube-system --all --timeout=10m
fi
# install CNI and CCM
install_addons

if [[ "${#}" -gt 0 ]]; then
# disable error exit so we can run post-command cleanup
Expand Down

0 comments on commit 4470ce1

Please sign in to comment.