From ad864910500b85c384701ad7513945b3b0eef6ab Mon Sep 17 00:00:00 2001 From: Jacob Weinstock Date: Thu, 2 Sep 2021 21:40:46 -0600 Subject: [PATCH] Remove used code: Clean up. Also, helps so that there are no unneeded contributions against these files. If the functionality in test/ is needed it should be moved to the sandbox repo. These are full-stack tests, not just Tink server tests. Signed-off-by: Jacob Weinstock --- generate-env.sh | 95 ----- setup.sh | 528 ------------------------- test-docker-compose.yml | 158 -------- test/actions/action1/hello_world.sh | 4 - test/actions/overwrite_data/Dockerfile | 2 - test/actions/update_data/Dockerfile | 5 - test/build_images.sh | 6 - test/data/hardware/hardware_1.json | 112 ------ test/data/hardware/hardware_2.json | 112 ------ test/data/template/sample_1 | 19 - test/data/template/sample_2 | 18 - test/data/template/sample_3 | 25 -- test/e2e_test.go | 72 ---- test/framework/hardware.go | 46 --- test/framework/setup.go | 165 -------- test/framework/tearDown.go | 15 - test/framework/template.go | 41 -- test/framework/utils.go | 29 -- test/framework/worker.go | 187 --------- test/framework/workflow.go | 51 --- test/push_images.sh | 8 - test/test_wf_timeout.go | 49 --- test/test_wf_with_multi_workers.go | 52 --- test/test_wf_with_worker.go | 54 --- 24 files changed, 1853 deletions(-) delete mode 100755 generate-env.sh delete mode 100755 setup.sh delete mode 100644 test-docker-compose.yml delete mode 100755 test/actions/action1/hello_world.sh delete mode 100644 test/actions/overwrite_data/Dockerfile delete mode 100644 test/actions/update_data/Dockerfile delete mode 100755 test/build_images.sh delete mode 100644 test/data/hardware/hardware_1.json delete mode 100644 test/data/hardware/hardware_2.json delete mode 100644 test/data/template/sample_1 delete mode 100644 test/data/template/sample_2 delete mode 100644 test/data/template/sample_3 delete mode 100644 test/e2e_test.go delete mode 100644 test/framework/hardware.go delete mode 100644 test/framework/setup.go delete mode 100644 test/framework/tearDown.go delete mode 100644 test/framework/template.go delete mode 100644 test/framework/utils.go delete mode 100644 test/framework/worker.go delete mode 100644 test/framework/workflow.go delete mode 100755 test/push_images.sh delete mode 100644 test/test_wf_timeout.go delete mode 100644 test/test_wf_with_multi_workers.go delete mode 100644 test/test_wf_with_worker.go diff --git a/generate-env.sh b/generate-env.sh deleted file mode 100755 index d80e22ca8..000000000 --- a/generate-env.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bash - -# stops the execution if a command or pipeline has an error -set -eu - -if command -v tput >/dev/null && tput setaf 1 >/dev/null 2>&1; then - # color codes - RED="$(tput setaf 1)" - RESET="$(tput sgr0)" -fi - -ERR="${RED:-}ERROR:${RESET:-}" - -err() ( - if [ -z "${1:-}" ]; then - cat >&2 - else - echo "$ERR " "$@" >&2 - fi -) - -candidate_interfaces() ( - ip -o link show | - awk -F': ' '{print $2}' | - sed 's/[ \t].*//;/^\(lo\|bond0\|\|\)$/d' | - sort -) - -validate_tinkerbell_network_interface() ( - local tink_interface=$1 - - if ! candidate_interfaces | grep -q "^$tink_interface$"; then - err "Invalid interface ($tink_interface) selected, must be one of:" - candidate_interfaces | err - return 1 - else - return 0 - fi -) - -generate_password() ( - head -c 12 /dev/urandom | sha256sum | cut -d' ' -f1 -) - -generate_env() ( - local tink_interface=$1 - - validate_tinkerbell_network_interface "$tink_interface" - - local tink_password - tink_password=$(generate_password) - local registry_password - registry_password=$(generate_password) - cat < .env" - exit 1 - fi - - generate_env "$1" -) - -main "$@" diff --git a/setup.sh b/setup.sh deleted file mode 100755 index f48f49f30..000000000 --- a/setup.sh +++ /dev/null @@ -1,528 +0,0 @@ -#!/usr/bin/env bash - -# stops the execution if a command or pipeline has an error -set -eu - -# Tinkerbell stack Linux setup script -# -# See https://tinkerbell.org/setup for the installation steps. - -# file to hold all environment variables -ENV_FILE=.env - -SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX) -readonly SCRATCH -function finish() ( - rm -rf "$SCRATCH" -) -trap finish EXIT - -DEPLOYDIR=$(pwd)/deploy -readonly DEPLOYDIR -readonly STATEDIR=$DEPLOYDIR/state - -if command -v tput >/dev/null && tput setaf 1 >/dev/null 2>&1; then - # color codes - RED="$(tput setaf 1)" - GREEN="$(tput setaf 2)" - YELLOW="$(tput setaf 3)" - RESET="$(tput sgr0)" -fi - -INFO="${GREEN:-}INFO:${RESET:-}" -ERR="${RED:-}ERROR:${RESET:-}" -WARN="${YELLOW:-}WARNING:${RESET:-}" -BLANK=" " -NEXT="${GREEN:-}NEXT:${RESET:-}" - -get_distribution() ( - local lsb_dist="" - # Every system that we officially support has /etc/os-release - if [ -r /etc/os-release ]; then - # shellcheck disable=SC1091 - lsb_dist="$(. /etc/os-release && echo "$ID")" - fi - # Returning an empty string here should be alright since the - # case statements don't act unless you provide an actual value - echo "$lsb_dist" | tr '[:upper:]' '[:lower:]' -) - -get_distro_version() ( - local lsb_version="0" - # Every system that we officially support has /etc/os-release - if [ -r /etc/os-release ]; then - # shellcheck disable=SC1091 - lsb_version="$(. /etc/os-release && echo "$VERSION_ID")" - fi - - echo "$lsb_version" -) - -is_network_configured() ( - # Require the provisioner interface have both the host and nginx IP - if ! ip addr show "$TINKERBELL_NETWORK_INTERFACE" | - grep -q "$TINKERBELL_HOST_IP"; then - return 1 - fi - - if ! ip addr show "$TINKERBELL_NETWORK_INTERFACE" | - grep -q "$TINKERBELL_NGINX_IP"; then - return 1 - fi - - return 0 -) - -identify_network_strategy() ( - local distro=$1 - local version=$2 - - case "$distro" in - ubuntu) - if jq -n --exit-status '$distro_version >= 17.10' --argjson distro_version "$version" >/dev/null 2>&1; then - echo "setup_networking_netplan" - else - echo "setup_networking_ubuntu_legacy" - fi - ;; - centos) - echo "setup_networking_centos" - ;; - *) - echo "setup_networking_manually" - ;; - esac -) - -setup_networking() ( - local distro=$1 - local version=$2 - - setup_network_forwarding - - if is_network_configured; then - echo "$INFO tinkerbell network interface is already configured" - return 0 - fi - - local strategy - strategy=$(identify_network_strategy "$distro" "$version") - - "${strategy}" "$distro" "$version" # execute the strategy - - if is_network_configured; then - echo "$INFO tinkerbell network interface configured successfully" - else - echo "$ERR tinkerbell network interface configuration failed" - fi -) - -setup_networking_manually() ( - local distro=$1 - local version=$2 - - echo "$ERR this setup script cannot configure $distro ($version)" - echo "$BLANK please read this script's source and configure it manually." - exit 1 -) - -setup_network_forwarding() ( - # enable IP forwarding for docker - if [ "$(sysctl -n net.ipv4.ip_forward)" != "1" ]; then - if [ -d /etc/sysctl.d ]; then - echo "net.ipv4.ip_forward=1" >/etc/sysctl.d/99-tinkerbell.conf - elif [ -f /etc/sysctl.conf ]; then - echo "net.ipv4.ip_forward=1" >>/etc/sysctl.conf - fi - - sysctl net.ipv4.ip_forward=1 - fi -) - -setup_networking_netplan() ( - jq -n \ - --arg interface "$TINKERBELL_NETWORK_INTERFACE" \ - --arg cidr "$TINKERBELL_CIDR" \ - --arg host_ip "$TINKERBELL_HOST_IP" \ - --arg nginx_ip "$TINKERBELL_NGINX_IP" \ - '{ - network: { - renderer: "networkd", - ethernets: { - ($interface): { - addresses: [ - "\($host_ip)/\($cidr)", - "\($nginx_ip)/\($cidr)" - ] - } - } - } -}' >"/etc/netplan/${TINKERBELL_NETWORK_INTERFACE}.yaml" - - ip link set "$TINKERBELL_NETWORK_INTERFACE" nomaster - netplan apply - echo "$INFO waiting for the network configuration to be applied by systemd-networkd" - sleep 3 -) - -setup_networking_ubuntu_legacy() ( - if [ ! -f /etc/network/interfaces ]; then - echo "$ERR file /etc/network/interfaces not found" - exit 1 - fi - - if grep -q "$TINKERBELL_NETWORK_INTERFACE" /etc/network/interfaces; then - echo "$ERR /etc/network/interfaces already has an entry for $TINKERBELL_NETWORK_INTERFACE." - echo "$BLANK To prevent breaking your network, please edit /etc/network/interfaces" - echo "$BLANK and configure $TINKERBELL_NETWORK_INTERFACE as follows:" - generate_iface_config - echo "" - echo "$BLANK Then run the following commands:" - echo "$BLANK ip link set $TINKERBELL_NETWORK_INTERFACE nomaster" - echo "$BLANK ifdown $TINKERBELL_NETWORK_INTERFACE:0" - echo "$BLANK ifdown $TINKERBELL_NETWORK_INTERFACE:1" - echo "$BLANK ifup $TINKERBELL_NETWORK_INTERFACE:0" - echo "$BLANK ifup $TINKERBELL_NETWORK_INTERFACE:1" - exit 1 - else - generate_iface_config >>/etc/network/interfaces - ip link set "$TINKERBELL_NETWORK_INTERFACE" nomaster - ifdown "$TINKERBELL_NETWORK_INTERFACE:0" - ifdown "$TINKERBELL_NETWORK_INTERFACE:1" - ifup "$TINKERBELL_NETWORK_INTERFACE:0" - ifup "$TINKERBELL_NETWORK_INTERFACE:1" - fi -) - -generate_iface_config() ( - cat <"$cfgfile" - - ip link set "$TINKERBELL_NETWORK_INTERFACE" nomaster - ifup "$TINKERBELL_NETWORK_INTERFACE" -) - -setup_osie() ( - mkdir -p "$STATEDIR/webroot" - - local osie_current=$STATEDIR/webroot/misc/osie/current - local tink_workflow=$STATEDIR/webroot/workflow/ - if [ ! -d "$osie_current" ] || [ ! -d "$tink_workflow" ]; then - mkdir -p "$osie_current" - mkdir -p "$tink_workflow" - pushd "$SCRATCH" - - if [ -z "${TB_OSIE_TAR:-}" ]; then - local OSIE_DOWNLOAD_LINK=${OSIE_DOWNLOAD_LINK:-"https://tinkerbell-oss.s3.amazonaws.com/osie-uploads/latest.tar.gz"} # If variable not set or null, use default. - curl -fsSL "${OSIE_DOWNLOAD_LINK}" -o ./osie.tar.gz - tar -zxf osie.tar.gz - else - if [ ! -f "$TB_OSIE_TAR" ]; then - echo "$ERR osie tar not found in the given location $TB_OSIE_TAR" - exit 1 - fi - echo "$INFO extracting osie tar" - tar -zxf "$TB_OSIE_TAR" - fi - - if pushd osie*/; then - if mv workflow-helper.sh workflow-helper-rc "$tink_workflow"; then - cp -r ./* "$osie_current" - else - echo "$ERR failed to move 'workflow-helper.sh' and 'workflow-helper-rc'" - exit 1 - fi - popd - fi - else - echo "$INFO found existing osie files, skipping osie setup" - fi -) - -check_container_status() ( - local container_name="$1" - local container_id - container_id=$(docker-compose -f "$DEPLOYDIR/docker-compose.yml" ps -q "$container_name") - - local start_moment - local current_status - start_moment=$(docker inspect "${container_id}" --format '{{ .State.StartedAt }}') - current_status=$(docker inspect "${container_id}" --format '{{ .State.Health.Status }}') - - case "$current_status" in - starting) - : # move on to the events check - ;; - healthy) - return 0 - ;; - unhealthy) - echo "$ERR $container_name is already running but not healthy. status: $current_status" - exit 1 - ;; - *) - echo "$ERR $container_name is already running but its state is a mystery. status: $current_status" - exit 1 - ;; - esac - - local status - read -r status < <(docker events \ - --since "$start_moment" \ - --filter "container=$container_id" \ - --filter "event=health_status" \ - --format '{{.Status}}') - - if [ "$status" != "health_status: healthy" ]; then - echo "$ERR $container_name is not healthy. status: $status" - exit 1 - fi -) - -generate_certificates() ( - mkdir -p "$STATEDIR/certs" - - if [ ! -f "$STATEDIR/certs/ca.json" ]; then - jq \ - '. - | .names[0].L = $facility - ' \ - "$DEPLOYDIR/tls/ca.in.json" \ - --arg ip "$TINKERBELL_HOST_IP" \ - --arg facility "$FACILITY" \ - >"$STATEDIR/certs/ca.json" - fi - - if [ ! -f "$STATEDIR/certs/server-csr.json" ]; then - jq \ - '. - | .hosts += [ $ip, "tinkerbell.\($facility).packet.net" ] - | .names[0].L = $facility - | .hosts = (.hosts | sort | unique) - ' \ - "$DEPLOYDIR/tls/server-csr.in.json" \ - --arg ip "$TINKERBELL_HOST_IP" \ - --arg facility "$FACILITY" \ - >"$STATEDIR/certs/server-csr.json" - fi - - docker build --tag "tinkerbell-certs" "$DEPLOYDIR/tls" - docker run --rm \ - --volume "$STATEDIR/certs:/certs" \ - --user "$UID:$(id -g)" \ - tinkerbell-certs - - local certs_dir="/etc/docker/certs.d/$TINKERBELL_HOST_IP" - - # copy public key to NGINX for workers - if ! cmp --quiet "$STATEDIR"/certs/ca.pem "$STATEDIR/webroot/workflow/ca.pem"; then - cp "$STATEDIR"/certs/ca.pem "$STATEDIR/webroot/workflow/ca.pem" - fi - - # update host to trust registry certificate - if ! cmp --quiet "$STATEDIR/certs/ca.pem" "$certs_dir/tinkerbell.crt"; then - if [ ! -d "$certs_dir/tinkerbell.crt" ]; then - # The user will be told to create the directory - # in the next block, if copying the certs there - # fails. - mkdir -p "$certs_dir" || true >/dev/null 2>&1 - fi - if ! cp "$STATEDIR/certs/ca.pem" "$certs_dir/tinkerbell.crt"; then - echo "$ERR please copy $STATEDIR/certs/ca.pem to $certs_dir/tinkerbell.crt" - echo "$BLANK and run $0 again:" - - if [ ! -d "$certs_dir" ]; then - echo "sudo mkdir -p '$certs_dir'" - fi - echo "sudo cp '$STATEDIR/certs/ca.pem' '$certs_dir/tinkerbell.crt'" - - exit 1 - fi - fi -) - -docker_login() ( - echo -n "$TINKERBELL_REGISTRY_PASSWORD" | docker login -u="$TINKERBELL_REGISTRY_USERNAME" --password-stdin "$TINKERBELL_HOST_IP" -) - -# This function takes an image specified as first parameter and it tags and -# push it using the second one. useful to proxy images from a repository to -# another. -docker_mirror_image() ( - local from=$1 - local to=$2 - - docker pull "$from" - docker tag "$from" "$to" - docker push "$to" -) - -start_registry() ( - docker-compose -f "$DEPLOYDIR/docker-compose.yml" up --build -d registry - check_container_status "registry" -) - -# This function supposes that the registry is up and running. -# It configures with the required dependencies. -bootstrap_docker_registry() ( - docker_login - - docker_mirror_image "quay.io/tinkerbell/tink-worker:latest" "${TINKERBELL_HOST_IP}/tink-worker:latest" -) - -setup_docker_registry() ( - local registry_images="$STATEDIR/registry" - if [ ! -d "$registry_images" ]; then - mkdir -p "$registry_images" - fi - start_registry - bootstrap_docker_registry -) - -start_components() ( - local components=(db hegel tink-server boots tink-cli nginx) - for comp in "${components[@]}"; do - docker-compose -f "$DEPLOYDIR/docker-compose.yml" up --build -d "$comp" - sleep 3 - check_container_status "$comp" - done -) - -command_exists() ( - command -v "$@" >/dev/null 2>&1 -) - -check_command() ( - if command_exists "$1"; then - echo "$BLANK Found prerequisite: $1" - return 0 - else - echo "$ERR Prerequisite command not installed: $1" - return 1 - fi -) - -check_prerequisites() ( - distro=$1 - version=$2 - - echo "$INFO verifying prerequisites for $distro ($version)" - failed=0 - check_command docker || failed=1 - check_command docker-compose || failed=1 - check_command ip || failed=1 - check_command jq || failed=1 - - strategy=$(identify_network_strategy "$distro" "$version") - case "$strategy" in - "setup_networking_netplan") - check_command netplan || failed=1 - ;; - "setup_networking_ubuntu_legacy") - check_command ifdown || failed=1 - check_command ifup || failed=1 - ;; - "setup_networking_centos") - check_command ifdown || failed=1 - check_command ifup || failed=1 - ;; - "setup_networking_manually") - echo "$WARN this script cannot automatically configure your network." - ;; - *) - echo "$ERR bug: unhandled network strategy: $strategy" - exit 1 - ;; - esac - - if [ $failed -eq 1 ]; then - echo "$ERR Prerequisites not met. Please install the missing commands and re-run $0." - exit 1 - fi -) - -whats_next() ( - echo "$NEXT 1. Enter /vagrant/deploy and run: source ../.env; docker-compose up -d" - echo "$BLANK 2. Try executing your first workflow." - echo "$BLANK Follow the steps described in https://tinkerbell.org/examples/hello-world/ to say 'Hello World!' with a workflow." -) - -setup_nat() ( - iptables -A FORWARD -i eth1 -o eth0 -j ACCEPT - iptables -A FORWARD -i eth0 -o eth1 -m state --state ESTABLISHED,RELATED -j ACCEPT - iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE -) - -do_setup() ( - # perform some very rudimentary platform detection - lsb_dist=$(get_distribution) - lsb_version=$(get_distro_version) - - echo "$INFO starting tinkerbell stack setup" - check_prerequisites "$lsb_dist" "$lsb_version" - - if [ ! -f "$ENV_FILE" ]; then - echo "$ERR Run './generate-env.sh network-interface > \"$ENV_FILE\"' before continuing." - exit 1 - fi - - # shellcheck disable=SC1090 - source "$ENV_FILE" - - setup_networking "$lsb_dist" "$lsb_version" - setup_nat - setup_osie - generate_certificates - setup_docker_registry - - echo "$INFO tinkerbell stack setup completed successfully on $lsb_dist server" - whats_next -) - -# wrapped up in a function so that we have some protection against only getting -# half the file during "curl | sh" -do_setup diff --git a/test-docker-compose.yml b/test-docker-compose.yml deleted file mode 100644 index e31c410d0..000000000 --- a/test-docker-compose.yml +++ /dev/null @@ -1,158 +0,0 @@ -version: "2.1" -services: - certs: - build: tls - volumes: - - ./certs:/certs - - tinkerbell: - build: . - environment: - FACILITY: ${FACILITY:-lab1} - PACKET_ENV: ${PACKET_ENV:-testing} - PACKET_VERSION: ${PACKET_VERSION:-5efab5ef3a42cb88f2d54f4ed3201c2dd6797b7d} - ROLLBAR_TOKEN: ${ROLLBAR_TOKEN:-9b78d0ad01d1467aa92c49c3a349b79d} - ROLLBAR_DISABLE: ${ROLLBAR_DISABLE:-0} - PGDATABASE: tinkerbell - PGHOST: db - PGPASSWORD: tinkerbell - PGPORT: 5432 - PGSSLMODE: disable - PGUSER: tinkerbell - depends_on: - certs: - condition: service_started - fluentbit: - condition: service_started - db: - condition: service_healthy - healthcheck: - test: ["CMD-SHELL", "wget -qO- 127.0.0.1:42114/cert"] - interval: 5s - timeout: 2s - retries: 30 - volumes: - - ./certs:/certs/${FACILITY} - #logging: - #driver: fluentd - #options: - #tag: tinkerbell - ports: - - 42113:42113/tcp - - 42114:42114/tcp - - db: - build: - context: deploy - environment: - POSTGRES_DB: tinkerbell - POSTGRES_PASSWORD: tinkerbell - POSTGRES_USER: tinkerbell - ports: - - 5432:5432 - depends_on: - - "fluentbit" - healthcheck: - test: ["CMD-SHELL", "pg_isready -U tinkerbell"] - #test: ["CMD-SHELL","psql -U tinkerbell -c \"select COUNT(*) from hardware;\""] - interval: 1s - timeout: 1s - retries: 30 - logging: - driver: fluentd - options: - tag: db - - registry: - build: - context: registry - args: - REGISTRY_USERNAME: username - REGISTRY_PASSWORD: password - environment: - REGISTRY_HTTP_ADDR: localhost:443 - REGISTRY_HTTP_TLS_CERTIFICATE: /certs/server.pem - REGISTRY_HTTP_TLS_KEY: /certs/server-key.pem - REGISTRY_AUTH: htpasswd - REGISTRY_AUTH_HTPASSWD_REALM: "Registry Realm" - REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd - volumes: - - ./certs:/certs - depends_on: - - fluentbit - logging: - driver: fluentd - options: - tag: registry - network_mode: host - - boots: - build: - context: ../boots - network_mode: host - command: -dhcp-addr 0.0.0.0:67 -tftp-addr 127.0.0.1:69 -http-addr 127.0.0.1:8080 -log-level DEBUG - environment: - API_AUTH_TOKEN: ${PACKET_API_AUTH_TOKEN:-PcyR6MvHb7wMmyYf9p8dJ2Dvnb9HxX8E} - API_CONSUMER_TOKEN: ${PACKET_CONSUMER_TOKEN:-djR2TAvbnkY92i8Ea2KFMZW6MusW1fk7qzeCUHgtnQRSsXnqxoCr6V2vhSxpqASf} - FACILITY_CODE: ${FACILITY:-lab1} - PACKET_ENV: ${PACKET_ENV:-testing} - PACKET_VERSION: ${PACKET_VERSION:-5efab5ef3a42cb88f2d54f4ed3201c2dd6797b7d} - ROLLBAR_TOKEN: ${ROLLBAR_TOKEN:-9b78d0ad01d1467aa92c49c3a349b79d} - ROLLBAR_DISABLE: ${ROLLBAR_DISABLE:-0} - MIRROR_HOST: ${MIRROR_HOST:-127.0.0.1} - DNS_SERVERS: 8.8.8.8 - PUBLIC_IP: 127.0.0.1 - BOOTP_BIND: 127.0.0.1:67 - HTTP_BIND: 127.0.0.1:80 - SYSLOG_BIND: 127.0.0.1:514 - TFTP_BIND: 127.0.0.1:69 - DOCKER_REGISTRY: 127.0.0.1 - REGISTRY_USERNAME: username - REGISTRY_PASSWORD: password - TINKERBELL_GRPC_AUTHORITY: 127.0.0.1:42113 - TINKERBELL_CERT_URL: http://127.0.0.1:42114/cert - ELASTIC_SEARCH_URL: 127.0.0.1:9200 - depends_on: - db: - condition: service_healthy - tinkerbell: - condition: service_healthy - fluentbit: - condition: service_started - logging: - driver: fluentd - options: - tag: boots - ports: - - 127.0.0.1:80:80/tcp - - 67:67/udp - - 69:69/udp - - elasticsearch: - image: elasticsearch:7.3.0 - ports: - - 9200:9200 - - 9300:9300 - environment: - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - discovery.type=single-node - - kibana: - image: kibana:7.3.0 - depends_on: - - elasticsearch - restart: always - environment: - ELASTICSEARCH_URL: http://elasticsearch:9200 - ports: - - 5601:5601 - - fluentbit: - image: fluent/fluent-bit:1.3 - ports: - - 24224:24224 - - 24224:24224/udp - depends_on: - - kibana - volumes: - - ./fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf diff --git a/test/actions/action1/hello_world.sh b/test/actions/action1/hello_world.sh deleted file mode 100755 index c0cddeaf3..000000000 --- a/test/actions/action1/hello_world.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -echo "This is Action1" -echo "This is again Action1" diff --git a/test/actions/overwrite_data/Dockerfile b/test/actions/overwrite_data/Dockerfile deleted file mode 100644 index 5ba04d78c..000000000 --- a/test/actions/overwrite_data/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM bash -CMD echo '{"action_02": "data_02"}' > /workflow/data diff --git a/test/actions/update_data/Dockerfile b/test/actions/update_data/Dockerfile deleted file mode 100644 index 9098c195e..000000000 --- a/test/actions/update_data/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM bash -ADD hello_world.sh /bin/hello_world.sh -RUN chmod +x /bin/hello_world.sh -#CMD echo '{"action_01": "data_01"}' > /workflow/data -ENTRYPOINT [ "/bin/hello_world.sh" ] diff --git a/test/build_images.sh b/test/build_images.sh deleted file mode 100755 index 17ffb1d5e..000000000 --- a/test/build_images.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -docker pull bash -docker tag bash:latest localhost/bash -docker build -t localhost/update-data actions/update_data/ -docker build -t localhost/overwrite-data actions/overwrite_data/ diff --git a/test/data/hardware/hardware_1.json b/test/data/hardware/hardware_1.json deleted file mode 100644 index 38ef714c9..000000000 --- a/test/data/hardware/hardware_1.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "allow_pxe": true, - "arch": "x86_64", - "bonding_mode": 4, - "efi_boot": true, - "facility_code": "ewr1", - "id": "f9f56dff-098a-4c5f-a51c-19ad35de85d1", - "instance": { - "allow_pxe": true, - "always_pxe": true, - "customdata": {}, - "hostname": "sample-machine", - "id": "3b2cccf1-1906-4d82-9fe0-c6d5188e70eb", - "ip_addresses": [], - "ipxe_script_url": "https://boot.netboot.xyz", - "network_ready": false, - "operating_system_version": { - "distro": "custom_ipxe", - "image_tag": null, - "os_slug": "custom_ipxe", - "slug": "custom_ipxe", - "version": "1" - }, - "project": {}, - "rescue": false, - "ssh_keys": [], - "state": "active", - "storage": {}, - "tags": [], - "userdata": "" - }, - "ip_addresses": [ - { - "address": "172.24.7.50", - "address_family": 4, - "cidr": 30, - "enabled": true, - "gateway": "172.24.7.49", - "management": true, - "netmask": "255.255.255.252", - "network": "172.24.7.48", - "public": false, - "type": "data" - }, - { - "address": "10.250.42.38", - "gateway": "10.250.42.1", - "netmask": "255.255.255.0", - "type": "ipmi" - } - ], - "management": { - "address": "10.250.42.38", - "gateway": "10.250.42.1", - "netmask": "255.255.255.0", - "type": "ipmi" - }, - "manufacturer": { - "id": "5c6dc6d1-0b75-4e77-811e-e2d3b4e5312a", - "slug": "dell" - }, - "network_ports": [ - { - "connected_ports": [ - { - "data": { - "bond": null, - "mac": null - }, - "hostname": "", - "id": "421f34e5-b141-4c4a-8234-239996bf7a1a", - "name": "xe-0/0/28", - "type": "data" - } - ], - "data": { - "bond": null, - "mac": "98:03:9b:89:d7:ba" - }, - "id": "213394d4-8642-443d-9140-c625cfaeb354", - "name": "eth0", - "type": "data" - }, - { - "connected_ports": [], - "data": { - "bond": null, - "mac": "98:03:9b:89:d7:bb" - }, - "id": "da245720-aa41-4368-9eca-7fb5af8bd492", - "name": "eth1", - "type": "data" - }, - { - "connected_ports": [], - "data": { - "bond": null, - "mac": "4c:d9:8f:42:21:f3" - }, - "id": "b1b7c29c-41d0-479d-9dbc-392250b8b36f", - "name": "ipmi0", - "type": "ipmi" - } - ], - "plan_slug": "c2.medium.x86", - "plan_version_slug": "c2.medium.x86.01", - "preinstalled_operating_system_version": {}, - "services": {}, - "state": "in_use", - "type": "server", - "vlan_id": null -} diff --git a/test/data/hardware/hardware_2.json b/test/data/hardware/hardware_2.json deleted file mode 100644 index fb85ab269..000000000 --- a/test/data/hardware/hardware_2.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "allow_pxe": true, - "arch": "x86_64", - "bonding_mode": 4, - "efi_boot": true, - "facility_code": "ewr1", - "id": "f9f56dff-098a-4c5f-a51c-19ad35de85d2", - "instance": { - "allow_pxe": true, - "always_pxe": true, - "customdata": {}, - "hostname": "sample-machine2", - "id": "3b2cccf1-1906-4d82-9fe0-c6d5188e70ec", - "ip_addresses": [], - "ipxe_script_url": "https://boot.netboot.xyz", - "network_ready": false, - "operating_system_version": { - "distro": "custom_ipxe", - "image_tag": null, - "os_slug": "custom_ipxe", - "slug": "custom_ipxe", - "version": "1" - }, - "project": {}, - "rescue": false, - "ssh_keys": [], - "state": "active", - "storage": {}, - "tags": [], - "userdata": "" - }, - "ip_addresses": [ - { - "address": "172.24.7.51", - "address_family": 4, - "cidr": 30, - "enabled": true, - "gateway": "172.24.7.49", - "management": true, - "netmask": "255.255.255.252", - "network": "172.24.7.48", - "public": false, - "type": "data" - }, - { - "address": "10.250.42.39", - "gateway": "10.250.42.1", - "netmask": "255.255.255.0", - "type": "ipmi" - } - ], - "management": { - "address": "10.250.42.39", - "gateway": "10.250.42.1", - "netmask": "255.255.255.0", - "type": "ipmi" - }, - "manufacturer": { - "id": "5c6dc6d1-0b75-4e77-811e-e2d3b4e5312b", - "slug": "dell" - }, - "network_ports": [ - { - "connected_ports": [ - { - "data": { - "bond": null, - "mac": null - }, - "hostname": "", - "id": "421f34e5-b141-4c4a-8234-239996bf7a1b", - "name": "xe-0/0/28", - "type": "data" - } - ], - "data": { - "bond": null, - "mac": "98:03:9b:89:d7:da" - }, - "id": "213394d4-8642-443d-9140-c625cfaeb354", - "name": "eth0", - "type": "data" - }, - { - "connected_ports": [], - "data": { - "bond": null, - "mac": "98:03:9b:89:d7:db" - }, - "id": "da245720-aa41-4368-9eca-7fb5af8bd493", - "name": "eth1", - "type": "data" - }, - { - "connected_ports": [], - "data": { - "bond": null, - "mac": "4c:d9:8f:42:21:f4" - }, - "id": "b1b7c29c-41d0-479d-9dbc-392250b8b37f", - "name": "ipmi0", - "type": "ipmi" - } - ], - "plan_slug": "c2.medium.x86", - "plan_version_slug": "c2.medium.x86.01", - "preinstalled_operating_system_version": {}, - "services": {}, - "state": "in_use", - "type": "server", - "vlan_id": null -} diff --git a/test/data/template/sample_1 b/test/data/template/sample_1 deleted file mode 100644 index b0c3b1fcd..000000000 --- a/test/data/template/sample_1 +++ /dev/null @@ -1,19 +0,0 @@ -version: '0.1' -name: packet_osie_provision -global_timeout: 600 -tasks: -- name: "run_one_worker" - worker: "{{.device_1}}" - environment: - MIRROR_HOST: 192.168.1.2 - actions: - - name: "server_partitioning" - image: update-data - timeout: 60 - environment: - NGINX_HOST: 192.168.1.2 - - name: "update_db" - image: update-data - timeout: 50 - environment: - MIRROR_HOST: 192.168.1.3 diff --git a/test/data/template/sample_2 b/test/data/template/sample_2 deleted file mode 100644 index 0972b7b0b..000000000 --- a/test/data/template/sample_2 +++ /dev/null @@ -1,18 +0,0 @@ -version: '0.1' -name: packet_osie_provision -global_timeout: 600 -tasks: -- name: "timeout-task" - worker: "{{.device_1}}" - actions: - - name: "update-data" - image: update-data - timeout: 10 - on-timeout: ["echo", "Timeout"] - on-failure: ["echo", "Failure"] - - name: "sleep-till-timeout" - image: bash - command: ["sleep", "20"] - timeout: 6 - on-timeout: ["echo", "Timeout"] - on-failure: ["echo", "Failure"] diff --git a/test/data/template/sample_3 b/test/data/template/sample_3 deleted file mode 100644 index c280d0a52..000000000 --- a/test/data/template/sample_3 +++ /dev/null @@ -1,25 +0,0 @@ -version: '0.1' -name: packet_osie_provision -global_timeout: 600 -tasks: -- name: "run-first-worker" - worker: "{{.device_1}}" - actions: - - name: "update-data" - image: update-data - timeout: 10 - on-timeout: ["echo", "Timeout"] - on-failure: ["echo", "Failure"] - - name: "overwrite-data" - image: overwrite-data - timeout: 10 - on-timeout: ["echo", "Timeout"] - on-failure: ["echo", "Failure"] -- name: "run-second-worker" - worker: "{{.device_2}}" - actions: - - name: "overwrite-again" - image: update-data - timeout: 10 - on-timeout: ["echo", "Timeout"] - on-failure: ["echo", "Failure"] diff --git a/test/e2e_test.go b/test/e2e_test.go deleted file mode 100644 index 680dea12d..000000000 --- a/test/e2e_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package e2e - -//import ( -// "os" -// "time" -// "testing" -// -// "github.com/tinkerbell/tink/client" -// "github.com/tinkerbell/tink/protos/workflow" -// "github.com/tinkerbell/tink/test/framework" -// "github.com/sirupsen/logrus" -//) -// -//var log *logrus.Logger = framework.Log -// -//func TestMain(m *testing.M) { -// log.Infoln("########Creating Setup########") -// time.Sleep(10 * time.Second) -// err := framework.StartStack() -// if err != nil { -// os.Exit(1) -// } -// os.Setenv("TINKERBELL_GRPC_AUTHORITY", "127.0.0.1:42113") -// os.Setenv("TINKERBELL_CERT_URL", "http://127.0.0.1:42114/cert") -// client.Setup() -// log.Infoln("########Setup Created########") -// -// log.Infoln("Creating hardware inventory") -// //push hardware data into hardware table -// hwData := []string{"hardware_1.json", "hardware_2.json"} -// err = framework.PushHardwareData(hwData) -// if err != nil { -// log.Errorln("Failed to push hardware inventory : ", err) -// os.Exit(2) -// } -// log.Infoln("Hardware inventory created") -// -// log.Infoln("########Starting Tests########") -// status := m.Run() -// log.Infoln("########Finished Tests########") -// log.Infoln("########Removing setup########") -// //err = framework.TearDown() -// if err != nil { -// os.Exit(3) -// } -// log.Infoln("########Setup removed########") -// os.Exit(status) -//} -// -//var testCases = map[string]struct { -// hardware string -// template string -// workers int64 -// expected workflow.ActionState -// ephData string -//}{ -// "testWfWithWorker": {"hardware_1.json", "sample_1", 1, workflow.ActionState_ACTION_STATE_SUCCESS, `{"action_02": "data_02"}`}, -// "testWfTimeout": {"hardware_1.json", "sample_2", 1, workflow.ActionState_ACTION_STATE_TIMEOUT, `{"action_01": "data_01"}`}, -// //"testWfWithMultiWorkers": {"hardware_1.json", "sample_3", 2, workflow.ActionState_ACTION_STATE_SUCCESS, `{"action_01": "data_01"}`}, -//} -// -//var runTestMap = map[string]func(t *testing.T){ -// "testWfWithWorker": TestWfWithWorker, -// "testWfTimeout": TestWfTimeout, -// //"testWfWithMultiWorkers": TestWfWithMultiWorkers, -//} -// -//func TestE2E(t *testing.T) { -// for key, val := range runTestMap { -// t.Run(key, val) -// } -//} diff --git a/test/framework/hardware.go b/test/framework/hardware.go deleted file mode 100644 index dcc006295..000000000 --- a/test/framework/hardware.go +++ /dev/null @@ -1,46 +0,0 @@ -package framework - -import ( - "context" - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - - "github.com/tinkerbell/tink/client" - "github.com/tinkerbell/tink/protos/hardware" -) - -func readHwData(file string) ([]byte, error) { - f, err := os.Open(filepath.Clean(file)) - if err != nil { - return []byte(""), err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return []byte(""), err - } - return data, nil -} - -// PushHardwareData : push hardware data -func PushHardwareData(hwDataFiles []string) error { - for _, hwFile := range hwDataFiles { - filepath := "data/hardware/" + hwFile - data, err := readHwData(filepath) - if err != nil { - return err - } - hw := hardware.Hardware{} - if err := json.Unmarshal(data, &hw); err != nil { - return err - } - _, err = client.HardwareClient.Push(context.Background(), &hardware.PushRequest{Data: &hw}) - if err != nil { - return err - } - } - return nil -} diff --git a/test/framework/setup.go b/test/framework/setup.go deleted file mode 100644 index a8a9cbc7f..000000000 --- a/test/framework/setup.go +++ /dev/null @@ -1,165 +0,0 @@ -package framework - -import ( - "os" - "os/exec" - "strings" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func buildCerts(filepath string) error { - cmd := exec.Command("/bin/sh", "-c", "docker-compose -f "+filepath+" up --build certs") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - return err -} - -func buildLocalDockerRegistry(filepath string) error { - cmd := exec.Command("/bin/sh", "-c", "docker-compose -f "+filepath+" up --build -d registry") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - return err -} - -func buildActionImages() error { - cmd := exec.Command("/bin/sh", "-c", "./build_images.sh") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - return err -} - -func pushImages() error { - cmd := exec.Command("/bin/sh", "-c", "./push_images.sh") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - return err -} - -func startDb(filepath string) error { - cmd := exec.Command("/bin/sh", "-c", "docker-compose -f "+filepath+" up --build -d db") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - return err -} - -func removeWorkerImage() error { - cmd := exec.Command("/bin/sh", "-c", "docker image rm worker") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - return err - -} - -func createWorkerImage() error { - cmd := exec.Command("/bin/sh", "-c", "docker build -t worker ../worker/") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - logger.Errorln("Failed to create worker image", err) - } - logger.Infoln("Worker Image created") - return err -} - -func initializeLogger() { - level := os.Getenv("TEST_LOG_LEVEL") - if level != "" { - switch strings.ToLower(level) { - case "panic": - logger.SetLevel(logrus.PanicLevel) - case "fatal": - logger.SetLevel(logrus.FatalLevel) - case "error": - logger.SetLevel(logrus.ErrorLevel) - case "warn", "warning": - logger.SetLevel(logrus.WarnLevel) - case "info": - logger.SetLevel(logrus.InfoLevel) - case "debug": - logger.SetLevel(logrus.DebugLevel) - case "trace": - logger.SetLevel(logrus.TraceLevel) - default: - logger.SetLevel(logrus.InfoLevel) - logger.Errorln("Invalid value for TEST_LOG_LEVEL ", level, " .Setting it to default(Info)") - } - } else { - logger.SetLevel(logrus.InfoLevel) - logger.Errorln("Variable TEST_LOG_LEVEL is not set. Default is Info.") - } - logger.SetFormatter(&logrus.JSONFormatter{}) -} - -// StartStack : Starting stack -func StartStack() error { - // Docker compose file for starting the containers - filepath := "../test-docker-compose.yml" - - // Initialize logger - initializeLogger() - - // Start Db and logging components - err := startDb(filepath) - if err != nil { - return err - } - - // Building certs - err = buildCerts(filepath) - if err != nil { - return err - } - - // Building registry - err = buildLocalDockerRegistry(filepath) - if err != nil { - return err - } - - //Build default images - err = buildActionImages() - if err != nil { - return err - } - - //Push Images into registry - err = pushImages() - if err != nil { - return err - } - - //Remove older worker image - err = removeWorkerImage() - if err != nil { - return err - } - - //Create new Worker image locally - err = createWorkerImage() - if err != nil { - logger.Errorln("failed to create worker Image") - return errors.Wrap(err, "worker image creation failed") - } - - initializeLogger() - - // Start other containers - cmd := exec.Command("/bin/sh", "-c", "docker-compose -f "+filepath+" up --build -d") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - if err != nil { - logger.Errorln("failed to create worker Image") - return errors.Wrap(err, "worker image creation failed") - } - return nil -} diff --git a/test/framework/tearDown.go b/test/framework/tearDown.go deleted file mode 100644 index 8132d98c9..000000000 --- a/test/framework/tearDown.go +++ /dev/null @@ -1,15 +0,0 @@ -package framework - -import ( - "os" - "os/exec" -) - -// TearDown : remove the setup -func TearDown() error { - cmd := exec.Command("/bin/sh", "-c", "docker-compose rm -svf") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - return err -} diff --git a/test/framework/template.go b/test/framework/template.go deleted file mode 100644 index d1c9d3343..000000000 --- a/test/framework/template.go +++ /dev/null @@ -1,41 +0,0 @@ -package framework - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - - "github.com/tinkerbell/tink/client" - "github.com/tinkerbell/tink/protos/template" -) - -func readTemplateData(file string) (string, error) { - f, err := os.Open(filepath.Clean(file)) - if err != nil { - return "", err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return "", err - } - return string(data), nil -} - -// CreateTemplate : create template in the database -func CreateTemplate(tmpl string) (string, error) { - filePath := "data/template/" + tmpl - // Read Content of template - data, err := readTemplateData(filePath) - if err != nil { - return "", err - } - req := template.WorkflowTemplate{Name: ("test_" + tmpl), Data: data} - res, err := client.TemplateClient.CreateTemplate(context.Background(), &req) - if err != nil { - return "", err - } - return res.Id, nil -} diff --git a/test/framework/utils.go b/test/framework/utils.go deleted file mode 100644 index 4390fdd7d..000000000 --- a/test/framework/utils.go +++ /dev/null @@ -1,29 +0,0 @@ -package framework - -import ( - "github.com/sirupsen/logrus" -) - -var logger = logrus.New() -var log *logrus.Entry - -// Log : This Log will be used in test cases. -var Log = logger - -// SetupWorkflow ... Set up workflow -func SetupWorkflow(tar string, tmpl string) (string, error) { - hardwareID := "c9d6faa4-08a2-4285-ae6c-f3401211bd56" - //Add template in template table - templateID, err := CreateTemplate(tmpl) - if err != nil { - return "", err - } - logger.Infoln("Template Created : ", templateID) - workflowID, err := CreateWorkflow(templateID, hardwareID) - if err != nil { - logger.Debugln("Workflow is not Created because : ", err) - return "", err - } - logger.Infoln("Workflow Created : ", workflowID) - return workflowID, nil -} diff --git a/test/framework/worker.go b/test/framework/worker.go deleted file mode 100644 index 96028207e..000000000 --- a/test/framework/worker.go +++ /dev/null @@ -1,187 +0,0 @@ -package framework - -import ( - "bufio" - "context" - "fmt" - "sync" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - dc "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/tinkerbell/tink/protos/workflow" -) - -var workerID = []string{"f9f56dff-098a-4c5f-a51c-19ad35de85d1", "f9f56dff-098a-4c5f-a51c-19ad35de85d2"} - -func initializeDockerClient() (*dc.Client, error) { - c, err := dc.NewClientWithOpts(dc.FromEnv, dc.WithAPIVersionNegotiation()) - if err != nil { - return nil, errors.Wrap(err, "DOCKER CLIENT") - } - return c, nil -} - -func createWorkerContainer(ctx context.Context, cli *dc.Client, workerID string, wfID string) (string, error) { - volume := map[string]struct{}{"/var/run/docker.sock": {}} - config := &container.Config{ - Image: "worker", - AttachStdout: true, - AttachStderr: true, - Tty: true, - Volumes: volume, - Env: []string{"TINKERBELL_GRPC_AUTHORITY=127.0.0.1:42113", "TINKERBELL_CERT_URL=http://127.0.0.1:42114/cert", "WORKER_ID=" + workerID, "DOCKER_REGISTRY=localhost:443", "DOCKER_API_VERSION=v1.40", "REGISTRY_USERNAME=username", "REGISTRY_PASSWORD=password"}, - } - hostConfig := &container.HostConfig{ - NetworkMode: "host", - Binds: []string{"/var/run/docker.sock:/var/run/docker.sock:rw", "/worker:/worker:rw"}, - } - resp, err := cli.ContainerCreate(ctx, config, hostConfig, nil, nil, workerID) - if err != nil { - return "", errors.Wrap(err, "DOCKER CREATE") - } - return resp.ID, nil -} - -func runContainer(ctx context.Context, cli *dc.Client, id string) error { - err := cli.ContainerStart(ctx, id, types.ContainerStartOptions{}) - if err != nil { - return errors.Wrap(err, "DOCKER START") - } - return nil -} - -func waitContainer(ctx context.Context, cli *dc.Client, id string, wg *sync.WaitGroup, failedWorkers chan<- string, statusChannel chan<- int64, stopLogs chan<- bool) { - // send API call to wait for the container completion - wait, errC := cli.ContainerWait(ctx, id, container.WaitConditionNotRunning) - select { - case status := <-wait: - statusChannel <- status.StatusCode - fmt.Println("Worker with id ", id, "finished successfully with status code ", status.StatusCode) - //stopLogs <- true - case err := <-errC: - log.Println("Worker with id ", id, "failed : ", err) - failedWorkers <- id - //stopLogs <- true - } - wg.Done() -} - -func removeContainer(ctx context.Context, cli *dc.Client, id string) error { - // create options for removing container - opts := types.ContainerRemoveOptions{ - Force: true, - RemoveLinks: false, - RemoveVolumes: true, - } - // send API call to remove the container - err := cli.ContainerRemove(ctx, id, opts) - if err != nil { - return err - } - log.Println("Worker Container removed : ", id) - return nil -} -func checkCurrentStatus(ctx context.Context, wfID string, workflowStatus chan workflow.State) { - for len(workflowStatus) == 0 { - GetCurrentStatus(ctx, wfID, workflowStatus) - } -} - -func captureLogs(ctx context.Context, cli *dc.Client, id string) { - reader, err := cli.ContainerLogs(context.Background(), id, types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Follow: true, - Timestamps: false, - }) - if err != nil { - panic(err) - } - defer reader.Close() - - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - fmt.Println(scanner.Text()) - } - fmt.Println("Logging Finished for container ", id) -} - -// StartWorkers starts the dummy workers -func StartWorkers(workers int64, workerStatus chan<- int64, wfID string) (workflow.State, error) { - log = logger.WithField("workflow_id", wfID) - var wg sync.WaitGroup - failedWorkers := make(chan string, workers) - workflowStatus := make(chan workflow.State, 1) - cli, err := initializeDockerClient() - if err != nil { - return workflow.State_STATE_FAILED, err - } - workerContainer := make([]string, workers) - var i int64 - for i = 0; i < workers; i++ { - ctx := context.Background() - cID, err := createWorkerContainer(ctx, cli, workerID[i], wfID) - log = logger.WithFields(logrus.Fields{"workflow_id": wfID, "worker_id": workerID[i]}) - if err != nil { - log.Errorln("Failed to create worker container : ", err) - } else { - workerContainer[i] = cID - log.Debugln("Worker container created with ID : ", cID) - // Run container - //startedAt := time.Now() - err = runContainer(ctx, cli, cID) - - if err != nil { - fmt.Println("Worker with id ", cID, " failed to start: ", err) - // TODO Should be remove the containers which started previously? - } else { - fmt.Println("Worker started with ID : ", cID) - wg.Add(1) - //capturing logs of action container in a go-routine - stopLogs := make(chan bool) - go captureLogs(ctx, cli, cID) - - go waitContainer(ctx, cli, cID, &wg, failedWorkers, workerStatus, stopLogs) - go checkCurrentStatus(ctx, wfID, workflowStatus) - } - } - } - - if err != nil { - return workflow.State_STATE_FAILED, err - } - - status := <-workflowStatus - log.Infoln("Status of Workflow : ", status) - wg.Wait() - ctx := context.Background() - for _, cID := range workerContainer { - err := removeContainer(ctx, cli, cID) - if err != nil { - log.Errorln("Failed to remove worker container with ID : ", cID) - } - } - - if len(failedWorkers) > 0 { - for i = 0; i < workers; i++ { - failedContainer, ok := <-failedWorkers - if ok { - log.Errorln("Worker Failed : ", failedContainer) - err = errors.New("Test Failed") - } - - if len(failedContainer) > 0 { - continue - } else { - break - } - } - } - if err != nil { - return status, err - } - return status, nil -} diff --git a/test/framework/workflow.go b/test/framework/workflow.go deleted file mode 100644 index 0cdfd0044..000000000 --- a/test/framework/workflow.go +++ /dev/null @@ -1,51 +0,0 @@ -package framework - -import ( - "context" - - "github.com/tinkerbell/tink/client" - "github.com/tinkerbell/tink/protos/workflow" -) - -// CreateWorkflow : create workflow -func CreateWorkflow(template string, hardware string) (string, error) { - req := workflow.CreateRequest{Template: template, Hardware: hardware} - res, err := client.WorkflowClient.CreateWorkflow(context.Background(), &req) - if err != nil { - return "", err - } - return res.Id, nil -} - -// GetCurrentStatus : get the current status of workflow from server -func GetCurrentStatus(ctx context.Context, wfID string, status chan workflow.State) { - req := workflow.GetRequest{Id: wfID} - wf, err := client.WorkflowClient.GetWorkflowContext(ctx, &req) - if err != nil { - log.Errorln("This is in Getting status ERROR: ", err) - status <- workflow.State_STATE_FAILED - } - if wf.CurrentActionState == workflow.State_STATE_FAILED { - status <- workflow.State_STATE_FAILED - } else if wf.CurrentActionState == workflow.State_STATE_TIMEOUT { - status <- workflow.State_STATE_TIMEOUT - } - currProgress := calWorkflowProgress(wf.CurrentActionIndex, wf.TotalNumberOfActions, wf.CurrentActionState) - if currProgress == 100 && wf.CurrentActionState == workflow.State_STATE_SUCCESS { - status <- workflow.State_STATE_SUCCESS - } -} - -func calWorkflowProgress(cur int64, total int64, state workflow.State) int64 { - if total == 0 || (cur == 0 && state != workflow.State_STATE_SUCCESS) { - return 0 - } - var taskCompleted int64 - if state == workflow.State_STATE_SUCCESS { - taskCompleted = cur + 1 - } else { - taskCompleted = cur - } - progress := (taskCompleted * 100) / total - return progress -} diff --git a/test/push_images.sh b/test/push_images.sh deleted file mode 100755 index 52deb3e2d..000000000 --- a/test/push_images.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -while ! docker login -u username -p password localhost; do - sleep 1 -done -docker push localhost/update-data -docker push localhost/overwrite-data -docker push localhost/bash diff --git a/test/test_wf_timeout.go b/test/test_wf_timeout.go deleted file mode 100644 index 0b5283f22..000000000 --- a/test/test_wf_timeout.go +++ /dev/null @@ -1,49 +0,0 @@ -package e2e - -//import ( -// "context" -// "testing" -// -// "github.com/tinkerbell/tink/client" -// "github.com/tinkerbell/tink/protos/workflow" -// "github.com/tinkerbell/tink/test/framework" -// "github.com/stretchr/testify/assert" -//) -// -//// TestWfTimeout : Timeout Test -//func TestWfTimeout(t *testing.T) { -// // Start test only if the test case exist in the table -// if test, ok := testCases["testWfTimeout"]; ok { -// wfID, err := framework.SetupWorkflow(test.hardware, test.template) -// -// if err != nil { -// t.Error(err) -// } -// assert.NoError(t, err, "Create Workflow") -// -// // Start the Worker -// workerStatus := make(chan int64, test.workers) -// wfStatus, err := framework.StartWorkers(test.workers, workerStatus, wfID) -// if err != nil { -// log.Errorf("Test Failed\n") -// t.Error(err) -// } -// assert.Equal(t, test.expected, wfStatus) -// assert.NoError(t, err, "Workers Failed") -// -// for i := int64(0); i < test.workers; i++ { -// if len(workerStatus) > 0 { -// // Check for worker exit status -// status := <-workerStatus -// expected := 0 -// assert.Equal(t, int64(expected), status) -// -// //checking for ephemeral data validation -// resp, err := client.WorkflowClient.GetWorkflowData(context.Background(), &workflow.GetWorkflowDataRequest{WorkflowID: wfID, Version: 0}) -// if err != nil { -// assert.Equal(t, test.ephData, string(resp.GetData())) -// } -// } -// } -// } -//} diff --git a/test/test_wf_with_multi_workers.go b/test/test_wf_with_multi_workers.go deleted file mode 100644 index ba43d00db..000000000 --- a/test/test_wf_with_multi_workers.go +++ /dev/null @@ -1,52 +0,0 @@ -package e2e - -//import ( -// "context" -// "testing" -// -// "github.com/tinkerbell/tink/client" -// "github.com/tinkerbell/tink/protos/workflow" -// "github.com/tinkerbell/tink/test/framework" -// "github.com/stretchr/testify/assert" -//) -// -//// TestWfWithMultiWorkers : Two Worker Test -//func TestWfWithMultiWorkers(t *testing.T) { -// // Start test only if the test case exist in the table -// if test, ok := testCases["testWfWithMultiWorkers"]; ok { -// wfID, err := framework.SetupWorkflow(test.hardware, test.template) -// -// if err != nil { -// t.Error(err) -// } -// assert.NoError(t, err, "Create Workflow") -// -// // Start the Worker -// workerStatus := make(chan int64, test.workers) -// wfStatus, err := framework.StartWorkers(test.workers, workerStatus, wfID) -// if err != nil { -// log.Errorf("Test Failed\n") -// t.Error(err) -// } -// assert.Equal(t, test.expected, wfStatus) -// assert.NoError(t, err, "Workers Failed") -// -// for i := int64(0); i < test.workers; i++ { -// if len(workerStatus) > 0 { -// // Check for worker exit status -// status := <-workerStatus -// expected := 0 -// if test.expected != workflow.ActionState_ACTION_STATE_SUCCESS { -// expected = 1 -// } -// assert.Equal(t, int64(expected), status) -// -// //checking for ephemeral data validation -// resp, err := client.WorkflowClient.GetWorkflowData(context.Background(), &workflow.GetWorkflowDataRequest{WorkflowID: wfID, Version: 0}) -// if err != nil { -// assert.Equal(t, test.ephData, string(resp.GetData())) -// } -// } -// } -// } -//} diff --git a/test/test_wf_with_worker.go b/test/test_wf_with_worker.go deleted file mode 100644 index 8ebe8f6e9..000000000 --- a/test/test_wf_with_worker.go +++ /dev/null @@ -1,54 +0,0 @@ -package e2e - -//import ( -// "context" -// "testing" -// -// "github.com/tinkerbell/tink/client" -// "github.com/tinkerbell/tink/protos/workflow" -// "github.com/tinkerbell/tink/test/framework" -// "github.com/stretchr/testify/assert" -//) -// -//// TestWfWithWorker : One Worker Test -//func TestWfWithWorker(t *testing.T) { -// -// // Start test only if the test case exist in the table -// if test, ok := testCases["testWfWithWorker"]; ok { -// wfID, err := framework.SetupWorkflow(test.hardware, test.template) -// -// if err != nil { -// t.Error(err) -// } -// if !assert.NoError(t, err, "Create Workflow") { -// t.Fatal(err) -// } -// -// // Start the Worker -// workerStatus := make(chan int64, test.workers) -// wfStatus, err := framework.StartWorkers(test.workers, workerStatus, wfID) -// if err != nil { -// log.Errorf("Test Failed\n") -// t.Error(err) -// } -// assert.Equal(t, test.expected, wfStatus) -// assert.NoError(t, err, "Workers Failed") -// -// for i := int64(0); i < test.workers; i++ { -// if len(workerStatus) > 0 { -// //Check for worker exit status -// status := <-workerStatus -// expected := 0 -// if test.expected != workflow.ActionState_ACTION_STATE_SUCCESS { -// expected = 1 -// } -// assert.Equal(t, int64(expected), status) -// //checking for ephemeral data validation -// resp, err := client.WorkflowClient.GetWorkflowData(context.Background(), &workflow.GetWorkflowDataRequest{WorkflowID: wfID, Version: 0}) -// if err != nil { -// assert.Equal(t, test.ephData, string(resp.GetData())) -// } -// } -// } -// } -//}