Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Restore CI in offline mode #3151

Merged
merged 5 commits into from
Mar 15, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions eve/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -597,6 +597,8 @@ models:
TF_VAR_rhsm_username: "%(secret:rhel_ci_login)s"
TF_VAR_rhsm_password: "%(secret:rhel_ci_password)s"
TF_VAR_debug: "%(prop:metalk8s_debug:-false)s"
TF_VAR_offline: "%(prop:offline:-true)s"
TF_VAR_use_proxy: "%(prop:use_proxy:-true)s"
MAX_RETRIES: "3"
workdir: build/eve/workers/openstack-terraform/terraform/
haltOnFailure: true
Expand Down Expand Up @@ -1641,6 +1643,17 @@ stages:
name: Set OS property to rhel-8
property: os
value: "rhel-8"
# FIXME: We disable offline mode for RHEL as it does not work for now,
# there are some certificates issues when trying to reach RedHat
# repositories through the company proxy.
- SetProperty:
name: Set offline property to false
property: offline
value: "false"
- SetProperty:
name: Set use_proxy property to false
property: use_proxy
value: "false"
- TriggerStages:
name: Trigger single-node-install with os set to rhel-8
stage_names:
Expand Down
4 changes: 4 additions & 0 deletions eve/workers/openstack-terraform/terraform/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
.terraform
.terraform.tfstate*
ssh_config
terraform.tfstate*
20 changes: 20 additions & 0 deletions eve/workers/openstack-terraform/terraform/common.tf
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,26 @@ variable "nodes_count" {
default = "2"
}

variable "offline" {
type = bool
default = false
}

variable "use_proxy" {
type = bool
default = false
}

variable "proxy_host" {
type = string
default = "proxy-cache"
}

variable "proxy_port" {
type = string
default = "3128"
}

resource "random_string" "current" {
length = 5
special = false
Expand Down
70 changes: 65 additions & 5 deletions eve/workers/openstack-terraform/terraform/network.tf
Original file line number Diff line number Diff line change
Expand Up @@ -63,13 +63,73 @@ resource "openstack_networking_secgroup_rule_v2" "ingress_ingress" {
security_group_id = openstack_networking_secgroup_v2.ingress.id
}

resource "openstack_networking_secgroup_rule_v2" "ingress_egress" {
resource "openstack_networking_secgroup_v2" "open_egress" {
name = "${local.prefix}-open-egress"
}

resource "openstack_networking_secgroup_v2" "egress" {
name = "${local.prefix}-egress"
delete_default_rules = var.offline
}

resource "openstack_networking_secgroup_rule_v2" "egress_egress" {
direction = "egress"
ethertype = "IPv4"
remote_group_id = openstack_networking_secgroup_v2.ingress.id
security_group_id = openstack_networking_secgroup_v2.ingress.id
remote_group_id = openstack_networking_secgroup_v2.egress.id
security_group_id = openstack_networking_secgroup_v2.egress.id
}

resource "openstack_networking_secgroup_v2" "open_egress" {
name = "${local.prefix}-open-egress"
data "dns_a_record_set" "proxy" {
count = var.use_proxy ? 1 : 0
host = var.proxy_host
}

resource "openstack_networking_secgroup_rule_v2" "egress_proxy" {
count = var.use_proxy ? length(data.dns_a_record_set.proxy[0].addrs) : 0
direction = "egress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = var.proxy_port
port_range_max = var.proxy_port
remote_ip_prefix = "${element(data.dns_a_record_set.proxy[0].addrs, count.index)}/32"
security_group_id = openstack_networking_secgroup_v2.egress.id
}

# Allow DNS queries to go out, especially because SSHd is doing
# reverse DNS on incoming IPs, otherwise it could really slow down
# connections
resource "openstack_networking_secgroup_rule_v2" "egress_dns" {
direction = "egress"
ethertype = "IPv4"
protocol = "udp"
port_range_min = 53
port_range_max = 53
remote_ip_prefix = "0.0.0.0/0"
security_group_id = openstack_networking_secgroup_v2.egress.id
}

# Used by cloud-init to retrieve SSH keys during VM deployment
resource "openstack_networking_secgroup_rule_v2" "egress_metadata" {
direction = "egress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 80
port_range_max = 80
remote_ip_prefix = "169.254.169.254/32"
security_group_id = openstack_networking_secgroup_v2.egress.id
}

data "dns_a_record_set" "rhsm" {
host = "subscription.rhsm.redhat.com"
}

resource "openstack_networking_secgroup_rule_v2" "egress_rhsm" {
count = length(data.dns_a_record_set.rhsm.addrs)
direction = "egress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 443
port_range_max = 443
remote_ip_prefix = "${element(data.dns_a_record_set.rhsm.addrs, count.index)}/32"
security_group_id = openstack_networking_secgroup_v2.egress.id
}
39 changes: 33 additions & 6 deletions eve/workers/openstack-terraform/terraform/nodes.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ resource "openstack_networking_port_v2" "bastion_public" {
network_id = data.openstack_networking_network_v2.default_network.id
security_group_ids = [
openstack_networking_secgroup_v2.ingress.id,
openstack_networking_secgroup_v2.egress.id,
openstack_networking_secgroup_v2.open_egress.id
]
}
Expand Down Expand Up @@ -67,6 +68,16 @@ resource "openstack_compute_instance_v2" "bastion" {
]
}

# Configure HTTP proxy for yum repositories
# We also use the proxy on the bastion, even if it has an access to the
# Internet, to benefit from the cache.
provisioner "remote-exec" {
inline = [
"if [ '${var.use_proxy}' = 'true' ]; then sudo chmod +x scripts/proxy-setup.sh; fi",
"if [ '${var.use_proxy}' = 'true' ]; then sudo scripts/proxy-setup.sh '${var.proxy_host}' '${var.proxy_port}'; fi"
]
}

# Install Cypress requirements
provisioner "remote-exec" {
inline = [
Expand Down Expand Up @@ -99,7 +110,7 @@ resource "openstack_networking_port_v2" "bootstrap_public" {
network_id = data.openstack_networking_network_v2.default_network.id
security_group_ids = [
openstack_networking_secgroup_v2.ingress.id,
openstack_networking_secgroup_v2.open_egress.id
openstack_networking_secgroup_v2.egress.id
]
}

Expand Down Expand Up @@ -161,6 +172,14 @@ resource "openstack_compute_instance_v2" "bootstrap" {
]
}

# Configure HTTP proxy for yum repositories
provisioner "remote-exec" {
inline = [
"if [ '${var.use_proxy}' = 'true' ]; then sudo chmod +x scripts/proxy-setup.sh; fi",
"if [ '${var.use_proxy}' = 'true' ]; then sudo scripts/proxy-setup.sh '${var.proxy_host}' '${var.proxy_port}'; fi"
]
}

# Register RHSM if OS = rhel
provisioner "remote-exec" {
inline = [
Expand All @@ -170,8 +189,8 @@ resource "openstack_compute_instance_v2" "bootstrap" {
}

provisioner "remote-exec" {
when = "destroy"
on_failure = "continue"
when = destroy
on_failure = continue
inline = [
"case '${var.os}' in rhel-*) sudo subscription-manager unregister;; esac"
]
Expand All @@ -185,7 +204,7 @@ resource "openstack_networking_port_v2" "nodes_public" {
network_id = data.openstack_networking_network_v2.default_network.id
security_group_ids = [
openstack_networking_secgroup_v2.ingress.id,
openstack_networking_secgroup_v2.open_egress.id
openstack_networking_secgroup_v2.egress.id
]
count = var.nodes_count
}
Expand Down Expand Up @@ -251,6 +270,14 @@ resource "openstack_compute_instance_v2" "nodes" {
]
}

# Configure HTTP proxy for yum repositories
provisioner "remote-exec" {
inline = [
"if [ '${var.use_proxy}' = 'true' ]; then sudo chmod +x scripts/proxy-setup.sh; fi",
"if [ '${var.use_proxy}' = 'true' ]; then sudo scripts/proxy-setup.sh '${var.proxy_host}' '${var.proxy_port}'; fi"
]
}

# Register RHSM if OS = rhel
provisioner "remote-exec" {
inline = [
Expand All @@ -260,8 +287,8 @@ resource "openstack_compute_instance_v2" "nodes" {
}

provisioner "remote-exec" {
when = "destroy"
on_failure = "continue"
when = destroy
on_failure = continue
inline = [
"case '${var.os}' in rhel-*) sudo subscription-manager unregister;; esac"
]
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/bin/bash

set -euo pipefail

PROXY_HOST=${1:-}
PROXY_PORT=${2:-3128}
PROXY_URL=http://$PROXY_HOST:$PROXY_PORT
PROXY_CA_URL=https://eve.devsca.com/vault/v1/release_engineering_root_CA_prod/cert/ca
PROXY_CA_PATH=/etc/pki/ca-trust/source/anchors/scality_internal_ca.crt


if ! [[ $PROXY_HOST ]]; then
echo "No proxy host provided, exiting"
exit
fi

if ! [ -f /etc/redhat-release ]; then
echo "The proxy script only handle RedHat family dists."
exit 1
fi

curl -skx "$PROXY_URL" -L "$PROXY_CA_URL" \
| sed -e 's/.*"certificate":"\(.*\)","revocation_time".*/\1/' \
-e 's/\\n/\n/g' \
> "$PROXY_CA_PATH"

update-ca-trust force-enable
update-ca-trust extract

yum-config-manager --save --setopt proxy="$PROXY_URL"
19 changes: 14 additions & 5 deletions tests/post/steps/test_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,13 +151,22 @@ def push_log_to_loki(k8s_client, context):
@then("we can query this example log from Loki")
def query_log_from_loki(k8s_client, context):
query = {"query": '{{identifier="{0}"}}'.format(context["test_log_id"])}
response = query_loki_api(k8s_client, query)
result_data = response[0]["data"]["result"]

assert result_data, "No test log found in Loki with identifier={}".format(
context["test_log_id"]
def _check_example_log():
response = query_loki_api(k8s_client, query)
result_data = response[0]["data"]["result"]

assert result_data, "No test log found in Loki with identifier={}".format(
context["test_log_id"]
)
assert result_data[0]["stream"]["identifier"] == context["test_log_id"]

utils.retry(
_check_example_log,
times=40,
wait=5,
name="check the example log can be retrieved",
)
assert result_data[0]["stream"]["identifier"] == context["test_log_id"]


@then("we can retrieve logs from logger pod in Loki API")
Expand Down