-
Notifications
You must be signed in to change notification settings - Fork 83
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #311 from istio-ecosystem/upstream-refresh-0810
Upstream refresh 0810
- Loading branch information
Showing
53 changed files
with
3,049 additions
and
680 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,186 @@ | ||
package clusters | ||
|
||
import ( | ||
"errors" | ||
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" | ||
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" | ||
"github.com/istio-ecosystem/admiral/admiral/pkg/registry" | ||
"github.com/istio-ecosystem/admiral/admiral/pkg/util" | ||
"github.com/sirupsen/logrus" | ||
networkingV1Alpha3 "istio.io/api/networking/v1alpha3" | ||
"sort" | ||
"strconv" | ||
"strings" | ||
) | ||
|
||
// IstioSEBuilder is an interface to construct Service Entry objects | ||
// from IdentityConfig objects. It can construct multiple Service Entries | ||
// from an IdentityConfig or construct just one given a IdentityConfigEnvironment. | ||
type IstioSEBuilder interface { | ||
BuildServiceEntriesFromIdentityConfig(ctxLogger *logrus.Entry, event admiral.EventType, identityConfig registry.IdentityConfig) ([]*networkingV1Alpha3.ServiceEntry, error) | ||
} | ||
|
||
type ServiceEntryBuilder struct { | ||
RemoteRegistry *RemoteRegistry | ||
ClientCluster string | ||
} | ||
|
||
// BuildServiceEntriesFromIdentityConfig builds service entries to write to the client cluster | ||
// by looping through the IdentityConfig clusters and environments to get spec information. It | ||
// builds one SE per environment per cluster the identity is deployed in. | ||
func (b *ServiceEntryBuilder) BuildServiceEntriesFromIdentityConfig(ctxLogger *logrus.Entry, identityConfig registry.IdentityConfig) ([]*networkingV1Alpha3.ServiceEntry, error) { | ||
var ( | ||
identity = identityConfig.IdentityName | ||
seMap = map[string]*networkingV1Alpha3.ServiceEntry{} | ||
serviceEntries = []*networkingV1Alpha3.ServiceEntry{} | ||
err error | ||
) | ||
ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", identity, common.GetSyncNamespace(), b.ClientCluster, "Beginning to build the SE spec") | ||
ingressEndpoints, err := getIngressEndpoints(identityConfig.Clusters) | ||
if err != nil { | ||
return serviceEntries, err | ||
} | ||
_, isServerOnClientCluster := ingressEndpoints[b.ClientCluster] | ||
dependentNamespaces, err := getExportTo(ctxLogger, b.RemoteRegistry.RegistryClient, b.ClientCluster, isServerOnClientCluster, identityConfig.ClientAssets) | ||
if err != nil { | ||
return serviceEntries, err | ||
} | ||
for _, identityConfigCluster := range identityConfig.Clusters { | ||
serverCluster := identityConfigCluster.Name | ||
for _, identityConfigEnvironment := range identityConfigCluster.Environment { | ||
env := identityConfigEnvironment.Name | ||
var tmpSe *networkingV1Alpha3.ServiceEntry | ||
ep, err := getServiceEntryEndpoint(ctxLogger, b.ClientCluster, serverCluster, ingressEndpoints, identityConfigEnvironment) | ||
if err != nil { | ||
return serviceEntries, err | ||
} | ||
ports, err := getServiceEntryPorts(identityConfigEnvironment) | ||
if err != nil { | ||
return serviceEntries, err | ||
} | ||
if se, ok := seMap[env]; !ok { | ||
tmpSe = &networkingV1Alpha3.ServiceEntry{ | ||
Hosts: []string{common.GetCnameVal([]string{env, strings.ToLower(identity), common.GetHostnameSuffix()})}, | ||
Ports: ports, | ||
Location: networkingV1Alpha3.ServiceEntry_MESH_INTERNAL, | ||
Resolution: networkingV1Alpha3.ServiceEntry_DNS, | ||
SubjectAltNames: []string{common.SpiffePrefix + common.GetSANPrefix() + common.Slash + identity}, | ||
Endpoints: []*networkingV1Alpha3.WorkloadEntry{ep}, | ||
ExportTo: dependentNamespaces, | ||
} | ||
} else { | ||
tmpSe = se | ||
tmpSe.Endpoints = append(tmpSe.Endpoints, ep) | ||
} | ||
serviceEntries = append(serviceEntries, tmpSe) | ||
} | ||
} | ||
return serviceEntries, err | ||
} | ||
|
||
// getIngressEndpoints constructs the endpoint of the ingress gateway/remote endpoint for an identity | ||
// by reading the information directly from the IdentityConfigCluster. | ||
func getIngressEndpoints(clusters []registry.IdentityConfigCluster) (map[string]*networkingV1Alpha3.WorkloadEntry, error) { | ||
ingressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{} | ||
var err error | ||
for _, cluster := range clusters { | ||
portNumber, err := strconv.ParseInt(cluster.IngressPort, 10, 64) | ||
if err != nil { | ||
return ingressEndpoints, err | ||
} | ||
ingressEndpoint := &networkingV1Alpha3.WorkloadEntry{ | ||
Address: cluster.IngressEndpoint, | ||
Locality: cluster.Locality, | ||
Ports: map[string]uint32{cluster.IngressPortName: uint32(portNumber)}, | ||
Labels: map[string]string{"security.istio.io/tlsMode": "istio"}, | ||
} | ||
ingressEndpoints[cluster.Name] = ingressEndpoint | ||
} | ||
return ingressEndpoints, err | ||
} | ||
|
||
// getServiceEntryPorts constructs the ServicePorts of the service entry that should be built | ||
// for the given identityConfigEnvironment. | ||
func getServiceEntryPorts(identityConfigEnvironment registry.IdentityConfigEnvironment) ([]*networkingV1Alpha3.ServicePort, error) { | ||
port := &networkingV1Alpha3.ServicePort{Number: uint32(common.DefaultServiceEntryPort), Name: util.Http, Protocol: util.Http} | ||
var err error | ||
if len(identityConfigEnvironment.Ports) == 0 { | ||
err = errors.New("identityConfigEnvironment had no ports for: " + identityConfigEnvironment.Name) | ||
} | ||
for _, servicePort := range identityConfigEnvironment.Ports { | ||
//TODO: 8090 is supposed to be set as the common.SidecarEnabledPorts (includeInboundPorts) which we check that in the rollout, but we don't have that information here so assume it is 8090 | ||
if servicePort.TargetPort.IntValue() == 8090 { | ||
protocol := util.GetPortProtocol(servicePort.Name) | ||
port.Name = protocol | ||
port.Protocol = protocol | ||
} | ||
} | ||
ports := []*networkingV1Alpha3.ServicePort{port} | ||
return ports, err | ||
} | ||
|
||
// getServiceEntryEndpoint constructs the remote or local endpoints of the service entry that | ||
// should be built for the given identityConfigEnvironment. | ||
func getServiceEntryEndpoint(ctxLogger *logrus.Entry, clientCluster string, serverCluster string, ingressEndpoints map[string]*networkingV1Alpha3.WorkloadEntry, identityConfigEnvironment registry.IdentityConfigEnvironment) (*networkingV1Alpha3.WorkloadEntry, error) { | ||
//TODO: Verify Local and Remote Endpoints are constructed correctly | ||
var err error | ||
endpoint := ingressEndpoints[serverCluster] | ||
tmpEp := endpoint.DeepCopy() | ||
tmpEp.Labels["type"] = identityConfigEnvironment.Type | ||
if clientCluster == serverCluster { | ||
//Local Endpoint Address if the identity is deployed on the same cluster as it's client and the endpoint is the remote endpoint for the cluster | ||
tmpEp.Address = identityConfigEnvironment.ServiceName + common.Sep + identityConfigEnvironment.Namespace + common.GetLocalDomainSuffix() | ||
for _, servicePort := range identityConfigEnvironment.Ports { | ||
//There should only be one mesh port here (http-service-mesh), but we are preserving ability to have multiple ports | ||
protocol := util.GetPortProtocol(servicePort.Name) | ||
if _, ok := tmpEp.Ports[protocol]; ok { | ||
tmpEp.Ports[protocol] = uint32(servicePort.Port) | ||
ctxLogger.Infof(common.CtxLogFormat, "LocalMeshPort", servicePort.Port, "", serverCluster, "Protocol: "+protocol) | ||
} else { | ||
err = errors.New("failed to get Port for protocol: " + protocol) | ||
} | ||
} | ||
} | ||
return tmpEp, err | ||
} | ||
|
||
// getExportTo constructs a sorted list of unique namespaces for a given cluster, client assets, | ||
// and cname, where each namespace is where a client asset of the cname is deployed on the cluster. If the cname | ||
// is also deployed on the cluster then the istio-system namespace is also in the list. | ||
func getExportTo(ctxLogger *logrus.Entry, registryClient registry.IdentityConfiguration, clientCluster string, isServerOnClientCluster bool, clientAssets []map[string]string) ([]string, error) { | ||
clientNamespaces := []string{} | ||
var err error | ||
var clientIdentityConfig registry.IdentityConfig | ||
for _, clientAsset := range clientAssets { | ||
// For each client asset of cname, we fetch its identityConfig | ||
clientIdentityConfig, err = registryClient.GetIdentityConfigByIdentityName(clientAsset["name"], ctxLogger) | ||
if err != nil { | ||
ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", clientAsset["name"], common.GetSyncNamespace(), "", "could not fetch IdentityConfig: "+err.Error()) | ||
continue | ||
} | ||
for _, clientIdentityConfigCluster := range clientIdentityConfig.Clusters { | ||
// For each cluster the client asset is deployed on, we check if that cluster is the client cluster we are writing to | ||
if clientCluster == clientIdentityConfigCluster.Name { | ||
for _, clientIdentityConfigEnvironment := range clientIdentityConfigCluster.Environment { | ||
// For each environment of the client asset on the client cluster, we add the namespace to our list | ||
//Do we need to check if ENV matches here for exportTo? Currently we don't, but we could | ||
clientNamespaces = append(clientNamespaces, clientIdentityConfigEnvironment.Namespace) | ||
} | ||
} | ||
} | ||
} | ||
if isServerOnClientCluster { | ||
clientNamespaces = append(clientNamespaces, common.NamespaceIstioSystem) | ||
} | ||
if len(clientNamespaces) > common.GetExportToMaxNamespaces() { | ||
clientNamespaces = []string{"*"} | ||
} | ||
sort.Strings(clientNamespaces) | ||
var dedupClientNamespaces []string | ||
for i := 0; i < len(clientNamespaces); i++ { | ||
if i == 0 || clientNamespaces[i] != clientNamespaces[i-1] { | ||
dedupClientNamespaces = append(dedupClientNamespaces, clientNamespaces[i]) | ||
} | ||
} | ||
return clientNamespaces, err | ||
} |
Oops, something went wrong.