Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix service port naming convention #1368

Merged
merged 22 commits into from
Jan 27, 2021
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .ci/run-e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,10 @@ then
export SPECIFY_OTEL_IMAGES=true
export SPECIFY_OTEL_CONFIG=true
make e2e-tests-smoke
elif [ "${TEST_GROUP}" = "istio" ]
then
echo "Running Smoke Tests with istio"
make e2e-tests-istio
else
echo "Unknown TEST_GROUP [${TEST_GROUP}]"; exit 1
fi
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/e2e-kubernetes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ jobs:
runs-on: ubuntu-16.04
strategy:
matrix:
TEST_GROUP: [smoke, es, cassandra, streaming, examples1, examples2, generate, es-otel, streaming-otel, smoke-otel, upgrade]
TEST_GROUP: [smoke, es, cassandra, streaming, examples1, examples2, generate, es-otel, streaming-otel, smoke-otel, upgrade, istio]
steps:
- uses: actions/setup-go@v1
with:
Expand Down
32 changes: 27 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,13 @@ ES_OPERATOR_NAMESPACE ?= openshift-logging
ES_OPERATOR_BRANCH ?= release-4.4
ES_OPERATOR_IMAGE ?= quay.io/openshift/origin-elasticsearch-operator:4.4
SDK_VERSION=v0.18.2
ISTIO_VERSION ?= 1.8.2
ISTIOCTL="./deploy/test/istio/bin/istioctl"
GOPATH ?= "$(HOME)/go"
GOROOT ?= "$(shell go env GOROOT)"

SED ?= "sed"

PROMETHEUS_OPERATOR_TAG ?= v0.39.0
PROMETHEUS_BUNDLE ?= https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/${PROMETHEUS_OPERATOR_TAG}/bundle.yaml

Expand Down Expand Up @@ -115,7 +119,7 @@ prepare-e2e-tests: build docker push
@cat deploy/role_binding.yaml >> deploy/test/namespace-manifests.yaml
@echo "---" >> deploy/test/namespace-manifests.yaml

@sed "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" test/operator.yaml >> deploy/test/namespace-manifests.yaml
@${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" test/operator.yaml >> deploy/test/namespace-manifests.yaml

@cp deploy/crds/jaegertracing.io_jaegers_crd.yaml deploy/test/global-manifests.yaml
@echo "---" >> deploy/test/global-manifests.yaml
Expand Down Expand Up @@ -194,6 +198,11 @@ e2e-tests-upgrade: prepare-e2e-tests
@echo Running Upgrade end-to-end tests...
UPGRADE_TEST_VERSION=$(shell .ci/get_test_upgrade_version.sh ${JAEGER_VERSION}) go test -tags=upgrade ./test/e2e/... $(TEST_OPTIONS)

.PHONY: e2e-tests-istio
e2e-tests-istio: prepare-e2e-tests istio
@echo Running Istio end-to-end tests...
@STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=istio ./test/e2e/... $(TEST_OPTIONS)

.PHONY: run
run: crd
@rm -rf /tmp/_cert*
Expand Down Expand Up @@ -250,6 +259,19 @@ else
@kubectl create -f ./test/elasticsearch.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
endif

.PHONY: istio
istio:
@echo Install istio with minimal profile
@mkdir -p deploy/test
@[ -f "${ISTIOCTL}" ] || (curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VERSION} TARGET_ARCH=x86_64 sh - && mv ./istio-${ISTIO_VERSION} ./deploy/test/istio)
@${ISTIOCTL} install --set profile=minimal -y

.PHONY: undeploy-istio
undeploy-istio:
@[ -f "${ISTIOCTL}" ] && (${ISTIOCTL} manifest generate --set profile=demo | kubectl delete --ignore-not-found=true -f -) || true
@kubectl delete namespace istio-system --ignore-not-found=true || true
@rm -rf deploy/test/istio

.PHONY: cassandra
cassandra: storage
@kubectl create -f ./test/cassandra.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true
Expand All @@ -270,7 +292,7 @@ else
@kubectl create clusterrolebinding strimzi-cluster-operator-entity-operator-delegation --clusterrole=strimzi-entity-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
@kubectl create clusterrolebinding strimzi-cluster-operator-topic-operator-delegation --clusterrole=strimzi-topic-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true
@curl --fail --location $(KAFKA_YAML) --output deploy/test/kafka-operator.yaml --create-dirs
@sed 's/namespace: .*/namespace: $(KAFKA_NAMESPACE)/' deploy/test/kafka-operator.yaml | kubectl -n $(KAFKA_NAMESPACE) apply -f - 2>&1 | grep -v "already exists" || true
@${SED} 's/namespace: .*/namespace: $(KAFKA_NAMESPACE)/' deploy/test/kafka-operator.yaml | kubectl -n $(KAFKA_NAMESPACE) apply -f - 2>&1 | grep -v "already exists" || true
@kubectl set env deployment strimzi-cluster-operator -n ${KAFKA_NAMESPACE} STRIMZI_NAMESPACE="*"
endif

Expand All @@ -294,7 +316,7 @@ else
@echo Creating namespace $(KAFKA_NAMESPACE)
@kubectl create namespace $(KAFKA_NAMESPACE) 2>&1 | grep -v "already exists" || true
@curl --fail --location $(KAFKA_EXAMPLE) --output deploy/test/kafka-example.yaml --create-dirs
@sed -i 's/size: 100Gi/size: 10Gi/g' deploy/test/kafka-example.yaml
@${SED} -i 's/size: 100Gi/size: 10Gi/g' deploy/test/kafka-example.yaml
@kubectl -n $(KAFKA_NAMESPACE) apply --dry-run=true -f deploy/test/kafka-example.yaml
@kubectl -n $(KAFKA_NAMESPACE) apply -f deploy/test/kafka-example.yaml 2>&1 | grep -v "already exists" || true
endif
Expand All @@ -321,7 +343,7 @@ else
endif

.PHONY: clean
clean: undeploy-kafka undeploy-es-operator undeploy-prometheus-operator
clean: undeploy-kafka undeploy-es-operator undeploy-prometheus-operator undeploy-istio
@rm -f deploy/test/*.yaml
@if [ -d deploy/test ]; then rmdir deploy/test ; fi
@kubectl delete -f ./test/cassandra.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true
Expand Down Expand Up @@ -383,7 +405,7 @@ deploy: ingress crd
@kubectl apply -f deploy/service_account.yaml
@kubectl apply -f deploy/cluster_role.yaml
@kubectl apply -f deploy/cluster_role_binding.yaml
@sed "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" deploy/operator.yaml | kubectl apply -f -
@${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" deploy/operator.yaml | kubectl apply -f -

.PHONY: operatorhub
operatorhub: check-operatorhub-pr-template
Expand Down
12 changes: 6 additions & 6 deletions pkg/service/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,31 +97,31 @@ func GetNameForHeadlessCollectorService(jaeger *v1.Jaeger) string {
func GetPortNameForGRPC(jaeger *v1.Jaeger) string {
if viper.GetString("platform") == v1.FlagPlatformOpenShift {
// we always have TLS certs when running on OpenShift, so, TLS is always enabled
return "https-grpc"
return "grpc-https"
}

// if we don't have a jaeger provided, it's certainly not TLS...
if nil == jaeger {
return "http-grpc"
return "grpc-http"
}

// perhaps the user has provisioned the certs and configured the CR manually?
// for that, we check whether the CLI option `collector.grpc.tls.enabled` was set for the collector
if val, ok := jaeger.Spec.Collector.Options.Map()["collector.grpc.tls.enabled"]; ok {
enabled, err := strconv.ParseBool(val)
if err != nil {
return "http-grpc" // not "true", defaults to false
return "grpc-http" // not "true", defaults to false
}

if enabled {
return "https-grpc" // explicit true
return "grpc-https" // explicit true
}

return "http-grpc" // explicit false
return "grpc-http" // explicit false
}

// doesn't look like we have TLS enabled
return "http-grpc"
return "grpc-http"
}

func getTypeForCollectorService(jaeger *v1.Jaeger) corev1.ServiceType {
Expand Down
12 changes: 6 additions & 6 deletions pkg/service/collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,13 @@ func TestCollectorGRPCPortName(t *testing.T) {
{
"nil",
nil,
"http-grpc",
"grpc-http",
false, // in openshift?
},
{
"no-tls",
&v1.Jaeger{},
"http-grpc",
"grpc-http",
false, // in openshift?
},
{
Expand All @@ -84,7 +84,7 @@ func TestCollectorGRPCPortName(t *testing.T) {
},
},
},
"http-grpc",
"grpc-http",
false, // in openshift?
},
{
Expand All @@ -96,7 +96,7 @@ func TestCollectorGRPCPortName(t *testing.T) {
},
},
},
"http-grpc",
"grpc-http",
false, // in openshift?
},
{
Expand All @@ -108,13 +108,13 @@ func TestCollectorGRPCPortName(t *testing.T) {
},
},
},
"https-grpc",
"grpc-https",
false, // in openshift?
},
{
"in-openshift",
&v1.Jaeger{},
"https-grpc",
"grpc-https",
true, // in openshift?
},
} {
Expand Down
24 changes: 2 additions & 22 deletions test/e2e/examples1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@
package e2e

import (
"context"
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"os"
Expand All @@ -16,13 +14,10 @@ import (
"time"

framework "github.com/operator-framework/operator-sdk/pkg/test"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
)

type ExamplesTestSuite struct {
Expand Down Expand Up @@ -126,23 +121,8 @@ func (suite *ExamplesTestSuite) TestBusinessApp() {
handler := &corev1.Handler{HTTPGet: livelinessHandler}
livelinessProbe := &corev1.Probe{Handler: *handler, InitialDelaySeconds: 1, FailureThreshold: 3, PeriodSeconds: 10, SuccessThreshold: 1, TimeoutSeconds: 1}

err = wait.Poll(retryInterval, timeout, func() (done bool, err error) {
vertxDeployment, err := fw.KubeClient.AppsV1().Deployments(namespace).Get(context.Background(), vertxDeploymentName, metav1.GetOptions{})
require.NoError(t, err)
containers := vertxDeployment.Spec.Template.Spec.Containers
for index, container := range containers {
if container.Name == vertxDeploymentName {
vertxDeployment.Spec.Template.Spec.Containers[index].LivenessProbe = livelinessProbe
updatedVertxDeployment, err := fw.KubeClient.AppsV1().Deployments(namespace).Update(context.Background(), vertxDeployment, metav1.UpdateOptions{})
if err != nil {
log.Warnf("Error %v updating vertx app, retrying", err)
return false, nil
}
log.Infof("Updated deployment %v", updatedVertxDeployment.Name)
return true, nil
}
}
return false, errors.New("Vertx deployment not found")
err = waitForDeploymentAndUpdate(vertxDeploymentName, vertxDeploymentName, func(container *corev1.Container) {
container.LivenessProbe = livelinessProbe
})
require.NoError(t, err)

Expand Down
145 changes: 145 additions & 0 deletions test/e2e/istio_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
// +build istio

package e2e

import (
"context"
jpkrohling marked this conversation as resolved.
Show resolved Hide resolved
"encoding/json"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"testing"
"time"

framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)

type IstioTestSuite struct {
suite.Suite
}

// LIFECYCLE - Suite
func (suite *IstioTestSuite) SetupSuite() {
t = suite.T()
var err error
ctx, err = prepare(t)
if err != nil {
if ctx != nil {
ctx.Cleanup()
}
require.FailNow(t, "Failed in prepare")
}
fw = framework.Global
namespace = ctx.GetID()
require.NotNil(t, namespace, "GetID failed")

// label namespace
ns, err := framework.Global.KubeClient.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{})
require.NoError(t, err, "failed to get the namespaces details: %v", err)

nsLabels := ns.GetLabels()
if nsLabels == nil {
nsLabels = make(map[string]string)
}
nsLabels["istio-injection"] = "enabled"
ns.SetLabels(nsLabels)

ns, err = framework.Global.KubeClient.CoreV1().Namespaces().Update(context.Background(), ns, metav1.UpdateOptions{})
require.NoError(t, err, "failed to update labels of the namespace %s", namespace)

addToFrameworkSchemeForSmokeTests(t)
}

func (suite *IstioTestSuite) TearDownSuite() {
handleSuiteTearDown()
}

// LIFECYCLE - Test

func (suite *IstioTestSuite) SetupTest() {
t = suite.T()
}

func (suite *IstioTestSuite) AfterTest(suiteName, testName string) {
handleTestFailure()
}

func TestIstioSuite(t *testing.T) {
suite.Run(t, new(IstioTestSuite))
}

func (suite *IstioTestSuite) TestEnvoySidecar() {
// First deploy a Jaeger instance
jaegerInstanceName := "simplest"
jaegerInstance := createJaegerInstanceFromFile(jaegerInstanceName, "../../examples/simplest.yaml")
defer undeployJaegerInstance(jaegerInstance)
err := WaitForDeployment(t, fw.KubeClient, namespace, jaegerInstanceName, 1, retryInterval, timeout+(1*time.Minute))
require.NoError(t, err)

// Now deploy examples/business-application-injected-sidecar.yaml
businessAppCR := getBusinessAppCR(err)
defer os.Remove(businessAppCR.Name())
cmd := exec.Command("kubectl", "create", "--namespace", namespace, "--filename", businessAppCR.Name())
output, err := cmd.CombinedOutput()
if err != nil && !strings.Contains(string(output), "AlreadyExists") {
require.NoError(t, err, "Failed creating Jaeger instance with: [%s]\n", string(output))
}
const vertxDeploymentName = "myapp"
err = WaitForDeployment(t, fw.KubeClient, namespace, vertxDeploymentName, 1, retryInterval, timeout)
require.NoError(t, err, "Failed waiting for myapp deployment")

// Add a liveliness probe to create some traces
vertxPort := intstr.IntOrString{IntVal: 8080}
livelinessHandler := &corev1.HTTPGetAction{Path: "/", Port: vertxPort, Scheme: corev1.URISchemeHTTP}
handler := &corev1.Handler{HTTPGet: livelinessHandler}
livelinessProbe := &corev1.Probe{Handler: *handler, InitialDelaySeconds: 1, FailureThreshold: 3, PeriodSeconds: 10, SuccessThreshold: 1, TimeoutSeconds: 1}

err = waitForDeploymentAndUpdate(vertxDeploymentName, vertxDeploymentName, func(container *corev1.Container) {
container.LivenessProbe = livelinessProbe
})
require.NoError(t, err)

exists := testContainerInPod(namespace, vertxDeploymentName, "istio-proxy", nil)
require.True(t, exists)

// Confirm that we've created some traces
ports := []string{"0:16686"}
portForward, closeChan := CreatePortForward(namespace, jaegerInstanceName, "all-in-one", ports, fw.KubeConfig)
defer portForward.Close()
defer close(closeChan)
forwardedPorts, err := portForward.GetPorts()
require.NoError(t, err)
queryPort := strconv.Itoa(int(forwardedPorts[0].Local))

url := "http://localhost:" + queryPort + "/api/traces?service=order"
err = WaitAndPollForHTTPResponse(url, func(response *http.Response) (bool, error) {
body, err := ioutil.ReadAll(response.Body)
require.NoError(t, err)

resp := &resp{}
err = json.Unmarshal(body, &resp)
require.NoError(t, err)

return len(resp.Data) > 0 && strings.Contains(string(body), "traceID"), nil
})
require.NoError(t, err, "SmokeTest failed")
}

func getBusinessAppCR(err error) *os.File {
jpkrohling marked this conversation as resolved.
Show resolved Hide resolved
content, err := ioutil.ReadFile("../../examples/business-application-injected-sidecar.yaml")
require.NoError(t, err)
newContent := strings.Replace(string(content), "image: jaegertracing/vertx-create-span:operator-e2e-tests", "image: "+vertxExampleImage, 1)
file, err := ioutil.TempFile("", "vertx-example")
require.NoError(t, err)
err = ioutil.WriteFile(file.Name(), []byte(newContent), 0666)
require.NoError(t, err)
return file
}
Loading