Skip to content

Commit

Permalink
make k8 install works in Mac (#1401)
Browse files Browse the repository at this point in the history
* Add support for proxy option when host doesn't have access to internal node ports
* Add Mac as OS for CD_DEV workflow

Address #1363
  • Loading branch information
sehz committed Aug 9, 2021
1 parent 7eb1237 commit f8af0c8
Show file tree
Hide file tree
Showing 7 changed files with 101 additions and 40 deletions.
19 changes: 13 additions & 6 deletions .github/workflows/cd_dev.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
os: [ubuntu-latest,macos-latest]
cluster_type: [local,k8]
env:
SLEEP: 10
Expand All @@ -36,25 +36,32 @@ jobs:

steps:
- uses: actions/checkout@v2
- uses: nolar/setup-k3d-k3s@v1
- name: Install Fluvio CLI
run: |
curl -fsS https://packages.fluvio.io/v1/install.sh | VERSION=latest bash
run: curl -fsS https://packages.fluvio.io/v1/install.sh | VERSION=latest bash
- name: Set up K3d for Ubuntu
uses: nolar/setup-k3d-k3s@v1
if: ${{ matrix.os == 'ubuntu-latest' }}
- name: Set up K8 for ubuntu(kind)
if: ${{ matrix.os == 'ubuntu-latest' }}
run: ./k8-util/cluster/reset-k3d.sh
- name: Set up Minikube for Mac
if: ${{ matrix.os == 'macos-latest' }}
run: ./k8-util/cluster/start-minikube-mac.sh
- name: Create Fluvio cluster and run E2E Test
uses: nick-invision/retry@v2
with:
timeout_minutes: 7
max_attempts: 3
command: |
./k8-util/cluster/reset-k3d.sh
export PATH=~/.fluvio/bin:$PATH
if [[ ${{ matrix.cluster_type }} == 'local' ]]; then
echo "Installing local cluster"
fluvio cluster delete
fluvio cluster start --local
else
echo "Installing k8 cluster"
fluvio cluster delete
fluvio cluster start --image-version latest
fi
sleep 30
fluvio version
Expand Down
15 changes: 12 additions & 3 deletions k8-util/cluster/reset-minikube.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,23 @@
# it uses docker as default driver
set -e
set -x
ARG1=${1:-docker}
K8_VERSION=${2:-1.21.2}

# set up default driver, use hyperkit for mac
if [ "$(uname)" == "Darwin" ]; then
EXTRA_CONFIG=--extra-config=apiserver.service-node-port-range=32700-32800 --ports=127.0.0.1:32700-32800:32700-32800

DRIVER=${1:-hyperkit}
echo "Using driver: $DRIVER"
# for mac, if driver is docker, set up proxy
if [ ${DRIVER} == "docker" ]; then
EXTRA_CONFIG="--extra-config=apiserver.service-node-port-range=32700-32800 --ports=127.0.0.1:32700-32800:32700-32800"
fi

else
DRIVER=${1:-docker}
fi


minikube delete
minikube start --driver $ARG1 --kubernetes-version=$K8_VERSION $EXTRA_CONFIG
minikube start --driver $DRIVER --kubernetes-version=$K8_VERSION $EXTRA_CONFIG
# minikube start --extra-config=apiserver.v=10
6 changes: 6 additions & 0 deletions k8-util/cluster/start-minikube-mac.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash
set -x
brew install minikube
minikube config set memory 16384
minikube start --driver virtualbox --kubernetes-version=1.21.2
kubectl get nodes
1 change: 1 addition & 0 deletions src/cluster/src/cli/start/k8.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ pub async fn process_k8(
.chart_values(opt.k8_config.chart_values)
.render_checks(true)
.upgrade(upgrade)
.proxy_addr(opt.proxy_addr)
.with_if(skip_sys, |b| b.install_sys(false))
.with_if(opt.skip_checks, |b| b.skip_checks(true));

Expand Down
4 changes: 4 additions & 0 deletions src/cluster/src/cli/start/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,10 @@ pub struct StartOpt {
/// Tries to setup necessary environment for cluster startup
#[structopt(long)]
pub setup: bool,

/// Proxy address
#[structopt(long)]
pub proxy_addr: Option<String>,
}

impl StartOpt {
Expand Down
89 changes: 58 additions & 31 deletions src/cluster/src/start/k8.rs
Original file line number Diff line number Diff line change
Expand Up @@ -684,6 +684,9 @@ impl ClusterInstaller {
}
};

if let Some(proxy) = &self.config.proxy_addr {
println!("Using proxy addr: {}", proxy);
}
self.install_app().await?;
let namespace = &self.config.namespace;
let (address, port) = self
Expand Down Expand Up @@ -760,37 +763,46 @@ impl ClusterInstaller {
chart_values.push(np_conf_path.to_path_buf());

debug!("Using NodePort service type");
debug!("Getting external IP from K8s node");
let kube_client = &self.kube_client;

debug!("Trying to query for Nodes");
let external_addr = if let Some(addr) = &self.config.proxy_addr {
debug!(?addr, "use proxying");
addr.to_owned()
} else {
debug!("Getting external IP from K8s node");
let kube_client = &self.kube_client;

let nodes = kube_client.retrieve_items::<NodeSpec, _>("").await?;
debug!("Trying to query for Nodes");

debug!("Results from Node query: {:#?}", &nodes);
let nodes = kube_client.retrieve_items::<NodeSpec, _>("").await?;

let mut node_addr: Vec<NodeAddress> = Vec::new();
for n in nodes.items.into_iter().map(|x| x.status.addresses) {
node_addr.extend(n)
}
debug!("Results from Node query: {:#?}", &nodes);

debug!("Node Addresses: {:#?}", node_addr);
let mut node_addr: Vec<NodeAddress> = Vec::new();
for n in nodes.items.into_iter().map(|x| x.status.addresses) {
node_addr.extend(n)
}

let external_addr = node_addr
.into_iter()
.find(|a| a.r#type == "InternalIP")
.ok_or_else(|| K8InstallError::Other("No nodes with InternalIP set".into()))?;
debug!("Node Addresses: {:#?}", node_addr);

node_addr
.into_iter()
.find(|a| a.r#type == "InternalIP")
.ok_or_else(|| K8InstallError::Other("No nodes with InternalIP set".into()))?
.address
};

// Set this annotation w/ the external address by overriding this Helm chart value:
let mut ingress_address = BTreeMap::new();
ingress_address.insert("fluvio.io/ingress-address", external_addr.address);
ingress_address.insert("fluvio.io/ingress-address", external_addr);

let mut service_annotation = BTreeMap::new();
service_annotation.insert("serviceAnnotations", ingress_address);

let mut helm_lb_config = BTreeMap::new();
helm_lb_config.insert("loadBalancer", service_annotation);

debug!(?helm_lb_config, "helm_lb_config");

serde_yaml::to_writer(&np_addr_fd, &helm_lb_config)
.map_err(|err| K8InstallError::Other(err.to_string()))?;
Some((np_addr_fd, np_conf_path))
Expand Down Expand Up @@ -921,20 +933,27 @@ impl ClusterInstaller {
LoadBalancerType::NodePort => {
let node_port = node_port.ok_or_else(|| K8InstallError::Other("Expecting a NodePort port".into()))?;

debug!("k8 node query");
let nodes = self.kube_client.retrieve_items::<NodeSpec, _>(ns).await?;
debug!("Output from k8 node query: {:#?}", &nodes);
let extern_addr = if let Some(addr) = &self.config.proxy_addr {
debug!(?addr,"using proxy");
addr.to_owned()
} else {

let mut node_addr : Vec<NodeAddress> = Vec::new();
for n in nodes.items.into_iter().map(|x| x.status.addresses ) {
node_addr.extend(n)
}
debug!("k8 node query");
let nodes = self.kube_client.retrieve_items::<NodeSpec, _>(ns).await?;
debug!("Output from k8 node query: {:#?}", &nodes);

let mut node_addr : Vec<NodeAddress> = Vec::new();
for n in nodes.items.into_iter().map(|x| x.status.addresses ) {
node_addr.extend(n)
}

// Return the first node with type "InternalIP"
node_addr.into_iter().find(|a| a.r#type == "InternalIP")
.ok_or_else(|| K8InstallError::Other("No nodes with InternalIP set".into()))?.address
};

// Return the first node with type "InternalIP"
let external_addr = node_addr.into_iter().find(|a| a.r#type == "InternalIP")
.ok_or_else(|| K8InstallError::Other("No nodes with InternalIP set".into()))?;
return Ok(Some((format!("{}:{}",extern_addr,node_port),node_port)))

return Ok(Some((format!("{}:{}",external_addr.address,node_port),node_port)))
},
LoadBalancerType::LoadBalancer => {
let ingress_addr = service
Expand Down Expand Up @@ -1023,12 +1042,20 @@ impl ClusterInstaller {

/// Wait until the Fluvio SC public service appears in Kubernetes
async fn wait_for_sc_port_check(&self, sock_addr_str: &str) -> Result<(), K8InstallError> {
info!(sock_addr = %sock_addr_str, "waiting for SC port check");
for i in 0..*MAX_SC_NETWORK_LOOP {
let sock_addr = self.wait_for_sc_dns(sock_addr_str).await?;
if TcpStream::connect(&*sock_addr).await.is_ok() {
info!(sock_addr = %sock_addr_str, "finished SC port check");
return Ok(());
if self.config.proxy_addr.is_none() {
debug!("resolving socket addr: {}", sock_addr_str);
let sock_addr = self.wait_for_sc_dns(sock_addr_str).await?;
if TcpStream::connect(&*sock_addr).await.is_ok() {
info!(sock_addr = %sock_addr_str, "finished SC port check");
return Ok(());
}
} else {
debug!("trying to connect to proxy: {}", sock_addr_str);
if TcpStream::connect(&*sock_addr_str).await.is_ok() {
info!(sock_addr = %sock_addr_str, "finished SC port check");
return Ok(());
}
}
info!(
attempt = i,
Expand Down
7 changes: 7 additions & 0 deletions tests/runner/src/utils/setup/environment/k8.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,15 @@ impl TestEnvironmentDriver for K8EnvironmentDriver {
let mut builder = ClusterConfig::builder(version);
if self.option.develop_mode() {
builder.development().expect("should test in develop mode");
} else {
// check if image version is specified
if let Some(image) = &self.option.image_version {
builder.image_tag(image);
}
}

builder
.proxy_addr(self.option.proxy_addr.clone())
.spu_replicas(self.option.spu())
.skip_checks(self.option.skip_checks())
.save_profile(true);
Expand Down

0 comments on commit f8af0c8

Please sign in to comment.