diff --git a/Cargo.lock b/Cargo.lock index 87840eb7d6cac..41a25a03dbc71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1325,8 +1325,7 @@ dependencies = [ [[package]] name = "linkerd2-proxy-api" version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c72fb98d969e1e94e95d52a6fcdf4693764702c369e577934256e72fb5bc61" +source = "git+https://github.com/linkerd/linkerd2-proxy-api?rev=c5648ae2a1e405cc6b8aca20522356ebdf20f1ea#c5648ae2a1e405cc6b8aca20522356ebdf20f1ea" dependencies = [ "http", "ipnet", diff --git a/Cargo.toml b/Cargo.toml index 2bd3689b03f4d..9ba219c0c9972 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,3 +12,7 @@ members = [ [profile.release] lto = "thin" + +[patch.crates-io] +# TODO(Zahari): switch released version once TLS protocol support is out. +linkerd2-proxy-api = { git = 'https://github.com/linkerd/linkerd2-proxy-api', rev = 'c5648ae2a1e405cc6b8aca20522356ebdf20f1ea' } \ No newline at end of file diff --git a/deny.toml b/deny.toml index e5014532f6b9b..680f9dabe2880 100644 --- a/deny.toml +++ b/deny.toml @@ -77,6 +77,7 @@ skip-tree = [ unknown-registry = "deny" unknown-git = "deny" allow-registry = ["https://github.com/rust-lang/crates.io-index"] +allow-git = ["https://github.com/linkerd/linkerd2-proxy-api"] [sources.allow-org] github = [] diff --git a/policy-controller/core/src/outbound.rs b/policy-controller/core/src/outbound.rs index c429b0e218a99..dcde37fbf3ee4 100644 --- a/policy-controller/core/src/outbound.rs +++ b/policy-controller/core/src/outbound.rs @@ -8,42 +8,44 @@ use chrono::{offset::Utc, DateTime}; use futures::prelude::*; use std::{net::IpAddr, num::NonZeroU16, pin::Pin, time}; +mod policy; +mod target; + +type FallbackPolicy = (); + +pub use self::{ + policy::{OutboundPolicy, ParentInfo}, + target::{Kind, OutboundDiscoverTarget, ResourceTarget}, +}; + +pub trait Route { + fn creation_timestamp(&self) -> Option>; +} + /// Models outbound policy discovery. #[async_trait::async_trait] -pub trait DiscoverOutboundPolicy { - async fn get_outbound_policy(&self, target: T) -> Result>; +pub trait DiscoverOutboundPolicy { + async fn get_outbound_policy(&self, target: R) -> Result>; - async fn watch_outbound_policy(&self, target: T) -> Result>; + async fn watch_outbound_policy(&self, target: R) -> Result>; + + async fn watch_external_policy(&self) -> ExternalPolicyStream; fn lookup_ip(&self, addr: IpAddr, port: NonZeroU16, source_namespace: String) -> Option; } pub type OutboundPolicyStream = Pin + Send + Sync + 'static>>; +pub type ExternalPolicyStream = Pin + Send + Sync + 'static>>; pub type HttpRoute = OutboundRoute; pub type GrpcRoute = OutboundRoute; -pub type RouteSet = HashMap; -pub struct OutboundDiscoverTarget { - pub service_name: String, - pub service_namespace: String, - pub service_port: NonZeroU16, - pub source_namespace: String, -} +pub type RouteSet = HashMap; -#[derive(Clone, Debug, PartialEq)] -pub struct OutboundPolicy { - pub http_routes: RouteSet, - pub grpc_routes: RouteSet, - pub authority: String, - pub name: String, - pub namespace: String, - pub port: NonZeroU16, - pub opaque: bool, - pub accrual: Option, - pub http_retry: Option>, - pub grpc_retry: Option>, - pub timeouts: RouteTimeouts, +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum TrafficPolicy { + Allow, + Deny, } #[derive(Clone, Debug, PartialEq, Eq)] @@ -56,6 +58,24 @@ pub struct OutboundRoute { pub creation_timestamp: Option>, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TlsRoute { + pub hostnames: Vec, + pub rule: TcpRouteRule, + /// This is required for ordering returned routes + /// by their creation timestamp. + pub creation_timestamp: Option>, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TcpRoute { + pub rule: TcpRouteRule, + + /// This is required for ordering returned routes + /// by their creation timestamp. + pub creation_timestamp: Option>, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub struct OutboundRouteRule { pub matches: Vec, @@ -65,10 +85,16 @@ pub struct OutboundRouteRule { pub filters: Vec, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TcpRouteRule { + pub backends: Vec, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub enum Backend { Addr(WeightedAddr), Service(WeightedService), + EgressNetwork(WeightedEgressNetwork), Invalid { weight: u32, message: String }, } @@ -90,6 +116,16 @@ pub struct WeightedService { pub exists: bool, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct WeightedEgressNetwork { + pub weight: u32, + pub name: String, + pub namespace: String, + pub port: Option, + pub filters: Vec, + pub exists: bool, +} + #[derive(Copy, Clone, Debug, PartialEq)] pub enum FailureAccrual { Consecutive { max_failures: u32, backoff: Backoff }, @@ -138,3 +174,21 @@ pub enum GrpcRetryCondition { Internal, Unavailable, } + +impl Route for OutboundRoute { + fn creation_timestamp(&self) -> Option> { + self.creation_timestamp + } +} + +impl Route for TcpRoute { + fn creation_timestamp(&self) -> Option> { + self.creation_timestamp + } +} + +impl Route for TlsRoute { + fn creation_timestamp(&self) -> Option> { + self.creation_timestamp + } +} diff --git a/policy-controller/core/src/outbound/policy.rs b/policy-controller/core/src/outbound/policy.rs new file mode 100644 index 0000000000000..37c4cbc9e7f6d --- /dev/null +++ b/policy-controller/core/src/outbound/policy.rs @@ -0,0 +1,63 @@ +use super::{ + FailureAccrual, GrpcRetryCondition, GrpcRoute, HttpRetryCondition, HttpRoute, RouteRetry, + RouteSet, RouteTimeouts, TcpRoute, TlsRoute, TrafficPolicy, +}; + +use std::num::NonZeroU16; + +// ParentInfo carries resource-specific information about +// the parent to which outbound policy is associated. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub enum ParentInfo { + Service { + name: String, + namespace: String, + authority: String, + }, + EgressNetwork { + name: String, + namespace: String, + traffic_policy: TrafficPolicy, + }, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct OutboundPolicy { + pub parent_info: ParentInfo, + pub http_routes: RouteSet, + pub grpc_routes: RouteSet, + pub tls_routes: RouteSet, + pub tcp_routes: RouteSet, + pub port: NonZeroU16, + pub opaque: bool, + pub accrual: Option, + pub http_retry: Option>, + pub grpc_retry: Option>, + pub timeouts: RouteTimeouts, +} + +impl ParentInfo { + pub fn name(&self) -> &str { + match self { + Self::EgressNetwork { name, .. } => name, + Self::Service { name, .. } => name, + } + } + + pub fn namespace(&self) -> &str { + match self { + Self::EgressNetwork { namespace, .. } => namespace, + Self::Service { namespace, .. } => namespace, + } + } +} + +impl OutboundPolicy { + pub fn parent_name(&self) -> &str { + self.parent_info.name() + } + + pub fn parent_namespace(&self) -> &str { + self.parent_info.namespace() + } +} diff --git a/policy-controller/core/src/outbound/target.rs b/policy-controller/core/src/outbound/target.rs new file mode 100644 index 0000000000000..d03cfd870aae9 --- /dev/null +++ b/policy-controller/core/src/outbound/target.rs @@ -0,0 +1,35 @@ +use std::{net::SocketAddr, num::NonZeroU16}; + +/// OutboundDiscoverTarget allows us to express the fact that +/// a policy resolution can be fulfilled by either a resource +/// we know about (a specific EgressNetwork or a Service) or +/// by our fallback mechanism. +#[derive(Clone, Debug)] +pub enum OutboundDiscoverTarget { + Resource(ResourceTarget), + External(SocketAddr), +} + +#[derive(Clone, Debug)] +pub struct ResourceTarget { + pub name: String, + pub namespace: String, + pub port: NonZeroU16, + pub source_namespace: String, + pub kind: Kind, +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum Kind { + EgressNetwork(SocketAddr), + Service, +} + +impl ResourceTarget { + pub fn original_dst(&self) -> Option { + match self.kind { + Kind::EgressNetwork(original_dst) => Some(original_dst), + Kind::Service => None, + } + } +} diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index 7a8d69fb1893a..cba161908b4e3 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -1,11 +1,11 @@ extern crate http as http_crate; use crate::workload; -use futures::prelude::*; +use futures::{prelude::*, StreamExt}; use http_crate::uri::Authority; use linkerd2_proxy_api::{ - self as api, - meta::{metadata, Metadata}, + self as api, destination, + meta::{metadata, Metadata, Resource}, outbound::{ self, outbound_policies_server::{OutboundPolicies, OutboundPoliciesServer}, @@ -13,15 +13,18 @@ use linkerd2_proxy_api::{ }; use linkerd_policy_controller_core::{ outbound::{ - DiscoverOutboundPolicy, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, - OutboundRoute, + DiscoverOutboundPolicy, ExternalPolicyStream, Kind, OutboundDiscoverTarget, OutboundPolicy, + OutboundPolicyStream, ParentInfo, ResourceTarget, Route, WeightedEgressNetwork, + WeightedService, }, routes::GroupKindNamespaceName, }; -use std::{num::NonZeroU16, str::FromStr, sync::Arc, time}; +use std::{net::SocketAddr, num::NonZeroU16, str::FromStr, sync::Arc, time}; mod grpc; mod http; +mod tcp; +mod tls; #[derive(Clone, Debug)] pub struct OutboundPolicyServer { @@ -34,7 +37,7 @@ pub struct OutboundPolicyServer { impl OutboundPolicyServer where - T: DiscoverOutboundPolicy + Send + Sync + 'static, + T: DiscoverOutboundPolicy + Send + Sync + 'static, { pub fn new( discover: T, @@ -62,14 +65,15 @@ where let target = match target { outbound::traffic_spec::Target::Addr(target) => target, outbound::traffic_spec::Target::Authority(auth) => { - return self.lookup_authority(&auth).map( - |(service_namespace, service_name, service_port)| OutboundDiscoverTarget { - service_name, - service_namespace, - service_port, + return self.lookup_authority(&auth).map(|(namespace, name, port)| { + OutboundDiscoverTarget::Resource(ResourceTarget { + kind: Kind::Service, + name, + namespace, + port, source_namespace, - }, - ) + }) + }) } }; @@ -139,29 +143,39 @@ where #[async_trait::async_trait] impl OutboundPolicies for OutboundPolicyServer where - T: DiscoverOutboundPolicy + Send + Sync + 'static, + T: DiscoverOutboundPolicy + Send + Sync + 'static, { async fn get( &self, req: tonic::Request, ) -> Result, tonic::Status> { - let service = self.lookup(req.into_inner())?; - - let policy = self - .index - .get_outbound_policy(service) - .await - .map_err(|error| { - tonic::Status::internal(format!("failed to get outbound policy: {error}")) - })?; + let target = self.lookup(req.into_inner())?; + + match target.clone() { + OutboundDiscoverTarget::Resource(resource) => { + let original_dst = resource.original_dst(); + let policy = self + .index + .get_outbound_policy(resource) + .await + .map_err(|error| { + tonic::Status::internal(format!("failed to get outbound policy: {error}")) + })?; + + if let Some(policy) = policy { + Ok(tonic::Response::new(to_proto( + policy, + self.allow_l5d_request_headers, + original_dst, + ))) + } else { + Err(tonic::Status::not_found("No such policy")) + } + } - if let Some(policy) = policy { - Ok(tonic::Response::new(to_service( - policy, - self.allow_l5d_request_headers, - ))) - } else { - Err(tonic::Status::not_found("No such policy")) + OutboundDiscoverTarget::External(original_dst) => { + Ok(tonic::Response::new(fallback(original_dst))) + } } } @@ -171,20 +185,35 @@ where &self, req: tonic::Request, ) -> Result, tonic::Status> { - let service = self.lookup(req.into_inner())?; + let target = self.lookup(req.into_inner())?; let drain = self.drain.clone(); - let rx = self - .index - .watch_outbound_policy(service) - .await - .map_err(|e| tonic::Status::internal(format!("lookup failed: {e}")))? - .ok_or_else(|| tonic::Status::not_found("unknown server"))?; - Ok(tonic::Response::new(response_stream( - drain, - rx, - self.allow_l5d_request_headers, - ))) + match target.clone() { + OutboundDiscoverTarget::Resource(resource) => { + let original_dst = resource.original_dst(); + let rx = self + .index + .watch_outbound_policy(resource) + .await + .map_err(|e| tonic::Status::internal(format!("lookup failed: {e}")))? + .ok_or_else(|| tonic::Status::not_found("unknown server"))?; + Ok(tonic::Response::new(response_stream( + drain, + rx, + self.allow_l5d_request_headers, + original_dst, + ))) + } + + OutboundDiscoverTarget::External(original_dst) => { + let rx = self.index.watch_external_policy().await; + Ok(tonic::Response::new(external_stream( + drain, + rx, + original_dst, + ))) + } + } } } @@ -196,6 +225,7 @@ fn response_stream( drain: drain::Watch, mut rx: OutboundPolicyStream, allow_l5d_request_headers: bool, + original_dst: Option, ) -> BoxWatchStream { Box::pin(async_stream::try_stream! { tokio::pin! { @@ -207,7 +237,7 @@ fn response_stream( // When the port is updated with a new server, update the server watch. res = rx.next() => match res { Some(policy) => { - yield to_service(policy, allow_l5d_request_headers); + yield to_proto(policy, allow_l5d_request_headers, original_dst); } None => return, }, @@ -222,18 +252,132 @@ fn response_stream( }) } -fn to_service( - outbound: OutboundPolicy, +fn external_stream( + drain: drain::Watch, + mut rx: ExternalPolicyStream, + original_dst: SocketAddr, +) -> BoxWatchStream { + Box::pin(async_stream::try_stream! { + tokio::pin! { + let shutdown = drain.signaled(); + } + + loop { + tokio::select! { + res = rx.next() => match res { + Some(_) => { + yield fallback(original_dst); + } + None => return, + }, + + // If the server starts shutting down, close the stream so that it doesn't hold the + // server open. + _ = &mut shutdown => { + return; + } + } + } + }) +} + +fn fallback(original_dst: SocketAddr) -> outbound::OutboundPolicy { + // This encoder sets deprecated timeouts for older proxies. + let metadata = Some(Metadata { + kind: Some(metadata::Kind::Default("egress-fallback".to_string())), + }); + + let backend = outbound::Backend { + metadata: metadata.clone(), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: 1, + ..Default::default() + }, + )), + }; + + let opaque = outbound::proxy_protocol::Opaque { + routes: vec![outbound::OpaqueRoute { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Default("egress-fallback".to_string())), + }), + rules: vec![outbound::opaque_route::Rule { + backends: Some(outbound::opaque_route::Distribution { + kind: Some(outbound::opaque_route::distribution::Kind::FirstAvailable( + outbound::opaque_route::distribution::FirstAvailable { + backends: vec![outbound::opaque_route::RouteBackend { + backend: Some(backend.clone()), + invalid: None, + }], + }, + )), + }), + }], + error: None, + }], + }; + + let http_routes = vec![outbound::HttpRoute { + hosts: Vec::default(), + metadata: metadata.clone(), + rules: vec![outbound::http_route::Rule { + backends: Some(outbound::http_route::Distribution { + kind: Some(outbound::http_route::distribution::Kind::FirstAvailable( + outbound::http_route::distribution::FirstAvailable { + backends: vec![outbound::http_route::RouteBackend { + backend: Some(backend), + ..Default::default() + }], + }, + )), + }), + matches: vec![api::http_route::HttpRouteMatch::default()], + filters: Vec::default(), + ..Default::default() + }], + }]; + + outbound::OutboundPolicy { + metadata, + protocol: Some(outbound::ProxyProtocol { + kind: Some(outbound::proxy_protocol::Kind::Detect( + outbound::proxy_protocol::Detect { + timeout: Some( + time::Duration::from_secs(10) + .try_into() + .expect("failed to convert detect timeout to protobuf"), + ), + opaque: Some(opaque), + http1: Some(outbound::proxy_protocol::Http1 { + routes: http_routes.clone(), + failure_accrual: None, + }), + http2: Some(outbound::proxy_protocol::Http2 { + routes: http_routes, + failure_accrual: None, + }), + }, + )), + }), + } +} + +fn to_proto( + policy: OutboundPolicy, allow_l5d_request_headers: bool, + original_dst: Option, ) -> outbound::OutboundPolicy { - let backend: outbound::Backend = default_backend(&outbound); + let backend: outbound::Backend = default_backend(&policy, original_dst); - let kind = if outbound.opaque { + let kind = if policy.opaque { outbound::proxy_protocol::Kind::Opaque(outbound::proxy_protocol::Opaque { - routes: vec![default_outbound_opaq_route(backend)], + routes: vec![default_outbound_opaq_route(backend, &policy.parent_info)], }) } else { - let accrual = outbound.accrual.map(|accrual| outbound::FailureAccrual { + let accrual = policy.accrual.map(|accrual| outbound::FailureAccrual { kind: Some(match accrual { linkerd_policy_controller_core::outbound::FailureAccrual::Consecutive { max_failures, @@ -251,8 +395,10 @@ fn to_service( }), }); - let mut http_routes = outbound.http_routes.into_iter().collect::>(); - let mut grpc_routes = outbound.grpc_routes.into_iter().collect::>(); + let mut grpc_routes = policy.grpc_routes.clone().into_iter().collect::>(); + let mut http_routes = policy.http_routes.clone().into_iter().collect::>(); + let mut tls_routes = policy.tls_routes.clone().into_iter().collect::>(); + let mut tcp_routes = policy.tcp_routes.clone().into_iter().collect::>(); if !grpc_routes.is_empty() { grpc_routes.sort_by(timestamp_then_name); @@ -260,9 +406,39 @@ fn to_service( backend, grpc_routes.into_iter(), accrual, - outbound.grpc_retry, - outbound.timeouts, + policy.grpc_retry.clone(), + policy.timeouts.clone(), + allow_l5d_request_headers, + &policy.parent_info, + original_dst, + ) + } else if !http_routes.is_empty() { + http_routes.sort_by(timestamp_then_name); + http::protocol( + backend, + http_routes.into_iter(), + accrual, + policy.http_retry.clone(), + policy.timeouts.clone(), allow_l5d_request_headers, + &policy.parent_info, + original_dst, + ) + } else if !tls_routes.is_empty() { + tls_routes.sort_by(timestamp_then_name); + tls::protocol( + backend, + tls_routes.into_iter(), + &policy.parent_info, + original_dst, + ) + } else if !tcp_routes.is_empty() { + tcp_routes.sort_by(timestamp_then_name); + tcp::protocol( + backend, + tcp_routes.into_iter(), + &policy.parent_info, + original_dst, ) } else { http_routes.sort_by(timestamp_then_name); @@ -270,20 +446,31 @@ fn to_service( backend, http_routes.into_iter(), accrual, - outbound.http_retry, - outbound.timeouts, + policy.http_retry.clone(), + policy.timeouts.clone(), allow_l5d_request_headers, + &policy.parent_info, + original_dst, ) } }; + let (parent_group, parent_kind, namespace, name) = match policy.parent_info { + ParentInfo::EgressNetwork { + namespace, name, .. + } => ("policy.linkerd.io", "EgressNetwork", namespace, name), + ParentInfo::Service { + name, namespace, .. + } => ("core", "Service", namespace, name), + }; + let metadata = Metadata { kind: Some(metadata::Kind::Resource(api::meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), - namespace: outbound.namespace, - name: outbound.name, - port: u16::from(outbound.port).into(), + group: parent_group.into(), + kind: parent_kind.into(), + namespace, + name, + port: u16::from(policy.port).into(), ..Default::default() })), }; @@ -294,13 +481,13 @@ fn to_service( } } -fn timestamp_then_name( - (left_id, left_route): &(GroupKindNamespaceName, OutboundRoute), - (right_id, right_route): &(GroupKindNamespaceName, OutboundRoute), +fn timestamp_then_name( + (left_id, left_route): &(GroupKindNamespaceName, R), + (right_id, right_route): &(GroupKindNamespaceName, R), ) -> std::cmp::Ordering { let by_ts = match ( - &left_route.creation_timestamp, - &right_route.creation_timestamp, + &left_route.creation_timestamp(), + &right_route.creation_timestamp(), ) { (Some(left_ts), Some(right_ts)) => left_ts.cmp(right_ts), (None, None) => std::cmp::Ordering::Equal, @@ -312,50 +499,107 @@ fn timestamp_then_name( by_ts.then_with(|| left_id.name.cmp(&right_id.name)) } -fn default_backend(outbound: &OutboundPolicy) -> outbound::Backend { - outbound::Backend { - metadata: Some(Metadata { - kind: Some(metadata::Kind::Resource(api::meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), - name: outbound.name.clone(), - namespace: outbound.namespace.clone(), - section: Default::default(), - port: u16::from(outbound.port).into(), - })), - }), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Balancer( - outbound::backend::BalanceP2c { - discovery: Some(outbound::backend::EndpointDiscovery { - kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( - outbound::backend::endpoint_discovery::DestinationGet { - path: outbound.authority.clone(), - }, - )), +fn default_backend(policy: &OutboundPolicy, original_dst: Option) -> outbound::Backend { + match policy.parent_info.clone() { + ParentInfo::Service { + authority, + namespace, + name, + .. + } => outbound::Backend { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Resource(api::meta::Resource { + group: "core".to_string(), + kind: "Service".to_string(), + name, + namespace, + section: Default::default(), + port: u16::from(policy.port).into(), + })), + }), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: authority.clone(), + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), + }, + ParentInfo::EgressNetwork { + namespace, name, .. + } => match original_dst { + Some(original_dst) => outbound::Backend { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Resource(api::meta::Resource { + group: "policy.linkerd.io".to_string(), + kind: "EgressNetwork".to_string(), + name, + namespace, + section: Default::default(), + port: u16::from(policy.port).into(), + })), }), - load: Some(default_balancer_config()), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: 1, + ..Default::default() + }, + )), }, - )), + None => { + tracing::error!("no original_dst for Egresspolicy"); + outbound::Backend { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Default("invalid".to_string())), + }), + queue: None, + kind: None, + } + } + }, } } -fn default_outbound_opaq_route(backend: outbound::Backend) -> outbound::OpaqueRoute { - let metadata = Some(Metadata { - kind: Some(metadata::Kind::Default("opaq".to_string())), - }); - let rules = vec![outbound::opaque_route::Rule { - backends: Some(outbound::opaque_route::Distribution { - kind: Some(outbound::opaque_route::distribution::Kind::FirstAvailable( - outbound::opaque_route::distribution::FirstAvailable { - backends: vec![outbound::opaque_route::RouteBackend { - backend: Some(backend), - }], - }, - )), - }), - }]; - outbound::OpaqueRoute { metadata, rules } +fn default_outbound_opaq_route( + backend: outbound::Backend, + parent_info: &ParentInfo, +) -> outbound::OpaqueRoute { + match parent_info { + ParentInfo::EgressNetwork { traffic_policy, .. } => { + tcp::default_outbound_egress_route(backend, traffic_policy) + } + ParentInfo::Service { .. } => { + let metadata = Some(Metadata { + kind: Some(metadata::Kind::Default("opaq".to_string())), + }); + let rules = vec![outbound::opaque_route::Rule { + backends: Some(outbound::opaque_route::Distribution { + kind: Some(outbound::opaque_route::distribution::Kind::FirstAvailable( + outbound::opaque_route::distribution::FirstAvailable { + backends: vec![outbound::opaque_route::RouteBackend { + backend: Some(backend), + invalid: None, + }], + }, + )), + }), + }]; + + outbound::OpaqueRoute { + metadata, + rules, + error: None, + } + } + } } fn default_balancer_config() -> outbound::backend::balance_p2c::Load { @@ -395,3 +639,38 @@ pub(crate) fn convert_duration( }) .ok() } + +pub(crate) fn service_meta(svc: WeightedService) -> Metadata { + Metadata { + kind: Some(metadata::Kind::Resource(Resource { + group: "core".to_string(), + kind: "Service".to_string(), + name: svc.name, + namespace: svc.namespace, + section: Default::default(), + port: u16::from(svc.port).into(), + })), + } +} + +pub(crate) fn egress_net_meta( + egress_net: WeightedEgressNetwork, + original_dst_port: Option, +) -> Metadata { + let port = egress_net + .port + .map(NonZeroU16::get) + .or(original_dst_port) + .unwrap_or_default(); + + Metadata { + kind: Some(metadata::Kind::Resource(Resource { + group: "policy.linkerd.io".to_string(), + kind: "EgressNetwork".to_string(), + name: egress_net.name, + namespace: egress_net.namespace, + section: Default::default(), + port: port.into(), + })), + } +} diff --git a/policy-controller/grpc/src/outbound/grpc.rs b/policy-controller/grpc/src/outbound/grpc.rs index 472eb714daea0..791191e3eca1f 100644 --- a/policy-controller/grpc/src/outbound/grpc.rs +++ b/policy-controller/grpc/src/outbound/grpc.rs @@ -2,16 +2,21 @@ use super::{convert_duration, default_balancer_config, default_queue_config}; use crate::routes::{ convert_host_match, convert_request_header_modifier_filter, grpc::convert_match, }; -use linkerd2_proxy_api::{destination, grpc_route, http_route, meta, outbound}; +use linkerd2_proxy_api::{ + destination, grpc_route, http_route, + meta::{self}, + outbound, +}; use linkerd_policy_controller_core::{ outbound::{ Backend, Filter, GrpcRetryCondition, GrpcRoute, OutboundRoute, OutboundRouteRule, - RouteRetry, RouteTimeouts, + ParentInfo, RouteRetry, RouteTimeouts, TrafficPolicy, }, routes::{FailureInjectorFilter, GroupKindNamespaceName}, }; use std::{net::SocketAddr, time}; +#[allow(clippy::too_many_arguments)] pub(crate) fn protocol( default_backend: outbound::Backend, routes: impl Iterator, @@ -19,8 +24,10 @@ pub(crate) fn protocol( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::proxy_protocol::Kind { - let routes = routes + let mut routes = routes .map(|(gknn, route)| { convert_outbound_route( gknn, @@ -29,15 +36,28 @@ pub(crate) fn protocol( service_retry.clone(), service_timeouts.clone(), allow_l5d_request_headers, + parent_info, + original_dst, ) }) .collect::>(); + + if let ParentInfo::EgressNetwork { traffic_policy, .. } = parent_info { + routes.push(default_outbound_egress_route( + default_backend, + service_retry, + service_timeouts, + traffic_policy, + )); + } + outbound::proxy_protocol::Kind::Grpc(outbound::proxy_protocol::Grpc { routes, failure_accrual, }) } +#[allow(clippy::too_many_arguments)] fn convert_outbound_route( gknn: GroupKindNamespaceName, OutboundRoute { @@ -49,6 +69,8 @@ fn convert_outbound_route( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::GrpcRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] @@ -77,7 +99,7 @@ fn convert_outbound_route( }| { let backends = backends .into_iter() - .map(convert_backend) + .map(|b| convert_backend(b, parent_info, original_dst)) .collect::>(); let dist = if backends.is_empty() { outbound::grpc_route::distribution::Kind::FirstAvailable( @@ -158,7 +180,13 @@ fn convert_outbound_route( } } -fn convert_backend(backend: Backend) -> outbound::grpc_route::WeightedRouteBackend { +fn convert_backend( + backend: Backend, + parent_info: &ParentInfo, + original_dst: Option, +) -> outbound::grpc_route::WeightedRouteBackend { + let original_dst_port = original_dst.map(|o| o.port()); + match backend { Backend::Addr(addr) => { let socket_addr = SocketAddr::new(addr.addr, addr.port.get()); @@ -181,88 +209,226 @@ fn convert_backend(backend: Backend) -> outbound::grpc_route::WeightedRouteBacke }), } } - Backend::Service(svc) => { - if svc.exists { - let filters = svc.filters.into_iter().map(convert_to_filter).collect(); - outbound::grpc_route::WeightedRouteBackend { - weight: svc.weight, - backend: Some(outbound::grpc_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Resource(meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), - name: svc.name, - namespace: svc.namespace, - section: Default::default(), - port: u16::from(svc.port).into(), - })), - }), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Balancer( - outbound::backend::BalanceP2c { - discovery: Some(outbound::backend::EndpointDiscovery { - kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( - outbound::backend::endpoint_discovery::DestinationGet { - path: svc.authority, - }, - )), - }), - load: Some(default_balancer_config()), - }, - )), - }), - filters, - ..Default::default() + Backend::Service(svc) if svc.exists => { + let filters = svc + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); + outbound::grpc_route::WeightedRouteBackend { + weight: svc.weight, + backend: Some(outbound::grpc_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::service_meta(svc.clone())), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: svc.authority, + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), }), - } - } else { - outbound::grpc_route::WeightedRouteBackend { - weight: svc.weight, - backend: Some(outbound::grpc_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + filters, + ..Default::default() + }), + } + } + Backend::Service(svc) => invalid_backend( + svc.weight, + format!("Service not found {}", svc.name), + super::service_meta(svc), + ), + Backend::EgressNetwork(egress_net) if egress_net.exists => { + match (parent_info, original_dst) { + ( + ParentInfo::EgressNetwork { + name, namespace, .. + }, + Some(original_dst), + ) => { + if *name == egress_net.name && *namespace == egress_net.namespace { + let filters = egress_net + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); + + outbound::grpc_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::grpc_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), + filters, + ..Default::default() }), - queue: Some(default_queue_config()), - kind: None, - }), - filters: vec![outbound::grpc_route::Filter { - kind: Some(outbound::grpc_route::filter::Kind::FailureInjector( - grpc_route::GrpcFailureInjector { - code: 500, - message: format!("Service not found {}", svc.name), - ratio: None, - }, - )), - }], - ..Default::default() - }), + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) + } } + (ParentInfo::EgressNetwork { .. }, None) => invalid_backend( + egress_net.weight, + "EgressNetwork can be resolved from an ip:port combo only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + (ParentInfo::Service { .. }, _) => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), } } - Backend::Invalid { weight, message } => outbound::grpc_route::WeightedRouteBackend { + Backend::EgressNetwork(egress_net) => invalid_backend( + egress_net.weight, + format!("EgressNetwork not found {}", egress_net.name), + super::egress_net_meta(egress_net, original_dst_port), + ), + Backend::Invalid { weight, message } => invalid_backend( weight, - backend: Some(outbound::grpc_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Default("invalid".to_string())), - }), - queue: Some(default_queue_config()), - kind: None, - }), - filters: vec![outbound::grpc_route::Filter { - kind: Some(outbound::grpc_route::filter::Kind::FailureInjector( - grpc_route::GrpcFailureInjector { - code: 500, - message, - ratio: None, - }, - )), - }], - ..Default::default() + message, + meta::Metadata { + kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + }, + ), + } +} + +fn invalid_backend( + weight: u32, + message: String, + meta: meta::Metadata, +) -> outbound::grpc_route::WeightedRouteBackend { + outbound::grpc_route::WeightedRouteBackend { + weight, + backend: Some(outbound::grpc_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(meta), + queue: Some(default_queue_config()), + kind: None, }), - }, + filters: vec![outbound::grpc_route::Filter { + kind: Some(outbound::grpc_route::filter::Kind::FailureInjector( + grpc_route::GrpcFailureInjector { + code: 500, + message, + ratio: None, + }, + )), + }], + ..Default::default() + }), + } +} + +pub(crate) fn default_outbound_egress_route( + backend: outbound::Backend, + service_retry: Option>, + service_timeouts: RouteTimeouts, + traffic_policy: &TrafficPolicy, +) -> outbound::GrpcRoute { + #![allow(deprecated)] + let (filters, name) = match traffic_policy { + TrafficPolicy::Allow => (Vec::default(), "grpc-egress-allow"), + TrafficPolicy::Deny => ( + vec![outbound::grpc_route::Filter { + kind: Some(outbound::grpc_route::filter::Kind::FailureInjector( + grpc_route::GrpcFailureInjector { + code: 7, + message: "traffic not allowed".to_string(), + ratio: None, + }, + )), + }], + "grpc-egress-deny", + ), + }; + + // This encoder sets deprecated timeouts for older proxies. + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Default(name.to_string())), + }); + let rules = vec![outbound::grpc_route::Rule { + matches: vec![grpc_route::GrpcRouteMatch::default()], + backends: Some(outbound::grpc_route::Distribution { + kind: Some(outbound::grpc_route::distribution::Kind::FirstAvailable( + outbound::grpc_route::distribution::FirstAvailable { + backends: vec![outbound::grpc_route::RouteBackend { + backend: Some(backend), + ..Default::default() + }], + }, + )), + }), + request_timeout: service_timeouts + .request + .and_then(|d| convert_duration("request timeout", d)), + timeouts: Some(http_route::Timeouts { + request: service_timeouts + .request + .and_then(|d| convert_duration("stream timeout", d)), + idle: service_timeouts + .idle + .and_then(|d| convert_duration("idle timeout", d)), + response: service_timeouts + .response + .and_then(|d| convert_duration("response timeout", d)), + }), + retry: service_retry.map(|r| outbound::grpc_route::Retry { + max_retries: r.limit.into(), + max_request_bytes: 64 * 1024, + backoff: Some(outbound::ExponentialBackoff { + min_backoff: Some(time::Duration::from_millis(25).try_into().unwrap()), + max_backoff: Some(time::Duration::from_millis(250).try_into().unwrap()), + jitter_ratio: 1.0, + }), + conditions: Some(r.conditions.iter().flatten().fold( + outbound::grpc_route::retry::Conditions::default(), + |mut cond, c| { + match c { + GrpcRetryCondition::Cancelled => cond.cancelled = true, + GrpcRetryCondition::DeadlineExceeded => cond.deadine_exceeded = true, + GrpcRetryCondition::Internal => cond.internal = true, + GrpcRetryCondition::ResourceExhausted => cond.resource_exhausted = true, + GrpcRetryCondition::Unavailable => cond.unavailable = true, + }; + cond + }, + )), + timeout: r.timeout.and_then(|d| convert_duration("retry timeout", d)), + }), + filters, + ..Default::default() + }]; + outbound::GrpcRoute { + metadata, + rules, + ..Default::default() } } diff --git a/policy-controller/grpc/src/outbound/http.rs b/policy-controller/grpc/src/outbound/http.rs index 75410081e1e8e..ad461a0aeb07f 100644 --- a/policy-controller/grpc/src/outbound/http.rs +++ b/policy-controller/grpc/src/outbound/http.rs @@ -9,13 +9,14 @@ use crate::routes::{ use linkerd2_proxy_api::{destination, http_route, meta, outbound}; use linkerd_policy_controller_core::{ outbound::{ - Backend, Filter, HttpRetryCondition, HttpRoute, OutboundRouteRule, RouteRetry, - RouteTimeouts, + Backend, Filter, HttpRetryCondition, HttpRoute, OutboundRouteRule, ParentInfo, RouteRetry, + RouteTimeouts, TrafficPolicy, }, routes::GroupKindNamespaceName, }; use std::{net::SocketAddr, time}; +#[allow(clippy::too_many_arguments)] pub(crate) fn protocol( default_backend: outbound::Backend, routes: impl Iterator, @@ -23,8 +24,10 @@ pub(crate) fn protocol( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::proxy_protocol::Kind { - let opaque_route = default_outbound_opaq_route(default_backend.clone()); + let opaque_route = default_outbound_opaq_route(default_backend.clone(), parent_info); let mut routes = routes .map(|(gknn, route)| { convert_outbound_route( @@ -34,16 +37,32 @@ pub(crate) fn protocol( service_retry.clone(), service_timeouts.clone(), allow_l5d_request_headers, + parent_info, + original_dst, ) }) .collect::>(); - if routes.is_empty() { - routes.push(default_outbound_route( - default_backend, - service_retry.clone(), - service_timeouts.clone(), - )); + + match parent_info { + ParentInfo::Service { .. } => { + if routes.is_empty() { + routes.push(default_outbound_service_route( + default_backend, + service_retry.clone(), + service_timeouts.clone(), + )); + } + } + ParentInfo::EgressNetwork { traffic_policy, .. } => { + routes.push(default_outbound_egress_route( + default_backend, + service_retry.clone(), + service_timeouts.clone(), + traffic_policy, + )); + } } + outbound::proxy_protocol::Kind::Detect(outbound::proxy_protocol::Detect { timeout: Some( time::Duration::from_secs(10) @@ -65,6 +84,7 @@ pub(crate) fn protocol( }) } +#[allow(clippy::too_many_arguments)] fn convert_outbound_route( gknn: GroupKindNamespaceName, HttpRoute { @@ -76,10 +96,11 @@ fn convert_outbound_route( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::HttpRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] - let metadata = Some(meta::Metadata { kind: Some(meta::metadata::Kind::Resource(meta::Resource { group: gknn.group.to_string(), @@ -104,7 +125,7 @@ fn convert_outbound_route( }| { let backends = backends .into_iter() - .map(convert_backend) + .map(|b| convert_backend(b, parent_info, original_dst)) .collect::>(); let dist = if backends.is_empty() { outbound::http_route::distribution::Kind::FirstAvailable( @@ -149,7 +170,12 @@ fn convert_outbound_route( } } -fn convert_backend(backend: Backend) -> outbound::http_route::WeightedRouteBackend { +fn convert_backend( + backend: Backend, + parent_info: &ParentInfo, + original_dst: Option, +) -> outbound::http_route::WeightedRouteBackend { + let original_dst_port = original_dst.map(|o| o.port()); match backend { Backend::Addr(addr) => { let socket_addr = SocketAddr::new(addr.addr, addr.port.get()); @@ -172,92 +198,144 @@ fn convert_backend(backend: Backend) -> outbound::http_route::WeightedRouteBacke }), } } - Backend::Service(svc) => { - if svc.exists { - let filters = svc.filters.into_iter().map(convert_to_filter).collect(); - outbound::http_route::WeightedRouteBackend { - weight: svc.weight, - backend: Some(outbound::http_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Resource(meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), - name: svc.name, - namespace: svc.namespace, - section: Default::default(), - port: u16::from(svc.port).into(), - })), - }), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Balancer( - outbound::backend::BalanceP2c { - discovery: Some(outbound::backend::EndpointDiscovery { - kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( - outbound::backend::endpoint_discovery::DestinationGet { - path: svc.authority, - }, - )), - }), - load: Some(default_balancer_config()), - }, - )), - }), - filters, - ..Default::default() + Backend::Service(svc) if svc.exists => { + let filters = svc + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); + outbound::http_route::WeightedRouteBackend { + weight: svc.weight, + backend: Some(outbound::http_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::service_meta(svc.clone())), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: svc.authority, + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), }), - } - } else { - outbound::http_route::WeightedRouteBackend { - weight: svc.weight, - backend: Some(outbound::http_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + filters, + ..Default::default() + }), + } + } + Backend::Service(svc) => invalid_backend( + svc.weight, + format!("Service not found {}", svc.name), + super::service_meta(svc), + ), + Backend::EgressNetwork(egress_net) if egress_net.exists => { + match (parent_info, original_dst) { + ( + ParentInfo::EgressNetwork { + name, namespace, .. + }, + Some(original_dst), + ) => { + if *name == egress_net.name && *namespace == egress_net.namespace { + let filters = egress_net + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); + + outbound::http_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::http_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), + filters, + ..Default::default() }), - queue: Some(default_queue_config()), - kind: None, - }), - filters: vec![outbound::http_route::Filter { - kind: Some(outbound::http_route::filter::Kind::FailureInjector( - http_route::HttpFailureInjector { - status: 500, - message: format!("Service not found {}", svc.name), - ratio: None, - }, - )), - }], - ..Default::default() - }), + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) + } } + (ParentInfo::EgressNetwork { .. }, None) => invalid_backend( + egress_net.weight, + "EgressNetwork can be resolved from an ip:port combo only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + (ParentInfo::Service { .. }, _) => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), } } - Backend::Invalid { weight, message } => outbound::http_route::WeightedRouteBackend { + Backend::EgressNetwork(egress_net) => invalid_backend( + egress_net.weight, + format!("EgressNetwork not found {}", egress_net.name), + super::egress_net_meta(egress_net, original_dst_port), + ), + Backend::Invalid { weight, message } => invalid_backend( weight, - backend: Some(outbound::http_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Default("invalid".to_string())), - }), - queue: Some(default_queue_config()), - kind: None, - }), - filters: vec![outbound::http_route::Filter { - kind: Some(outbound::http_route::filter::Kind::FailureInjector( - http_route::HttpFailureInjector { - status: 500, - message, - ratio: None, - }, - )), - }], - ..Default::default() + message, + meta::Metadata { + kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + }, + ), + } +} + +fn invalid_backend( + weight: u32, + message: String, + meta: meta::Metadata, +) -> outbound::http_route::WeightedRouteBackend { + outbound::http_route::WeightedRouteBackend { + weight, + backend: Some(outbound::http_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(meta), + queue: Some(default_queue_config()), + kind: None, }), - }, + filters: vec![outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::FailureInjector( + http_route::HttpFailureInjector { + status: 500, + message, + ratio: None, + }, + )), + }], + ..Default::default() + }), } } -pub(crate) fn default_outbound_route( +pub(crate) fn default_outbound_service_route( backend: outbound::Backend, service_retry: Option>, service_timeouts: RouteTimeouts, @@ -299,6 +377,65 @@ pub(crate) fn default_outbound_route( } } +pub(crate) fn default_outbound_egress_route( + backend: outbound::Backend, + service_retry: Option>, + service_timeouts: RouteTimeouts, + traffic_policy: &TrafficPolicy, +) -> outbound::HttpRoute { + #![allow(deprecated)] + let (filters, name) = match traffic_policy { + TrafficPolicy::Allow => (Vec::default(), "http-egress-allow"), + TrafficPolicy::Deny => ( + vec![outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::FailureInjector( + http_route::HttpFailureInjector { + status: 403, + message: "traffic not allowed".to_string(), + ratio: None, + }, + )), + }], + "http-egress-deny", + ), + }; + + // This encoder sets deprecated timeouts for older proxies. + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Default(name.to_string())), + }); + let rules = vec![outbound::http_route::Rule { + matches: vec![http_route::HttpRouteMatch { + path: Some(http_route::PathMatch { + kind: Some(http_route::path_match::Kind::Prefix("/".to_string())), + }), + ..Default::default() + }], + backends: Some(outbound::http_route::Distribution { + kind: Some(outbound::http_route::distribution::Kind::FirstAvailable( + outbound::http_route::distribution::FirstAvailable { + backends: vec![outbound::http_route::RouteBackend { + backend: Some(backend), + ..Default::default() + }], + }, + )), + }), + retry: service_retry.map(convert_retry), + request_timeout: service_timeouts + .request + .and_then(|d| convert_duration("request timeout", d)), + timeouts: Some(convert_timeouts(service_timeouts)), + filters, + ..Default::default() + }]; + outbound::HttpRoute { + metadata, + rules, + ..Default::default() + } +} + fn convert_to_filter(filter: Filter) -> outbound::http_route::Filter { use outbound::http_route::filter::Kind; diff --git a/policy-controller/grpc/src/outbound/tcp.rs b/policy-controller/grpc/src/outbound/tcp.rs new file mode 100644 index 0000000000000..50821364d9207 --- /dev/null +++ b/policy-controller/grpc/src/outbound/tcp.rs @@ -0,0 +1,262 @@ +use super::{default_balancer_config, default_queue_config}; +use linkerd2_proxy_api::{destination, meta, outbound}; +use linkerd_policy_controller_core::{ + outbound::{Backend, ParentInfo, TcpRoute, TrafficPolicy}, + routes::GroupKindNamespaceName, +}; +use std::net::SocketAddr; + +pub(crate) fn protocol( + default_backend: outbound::Backend, + routes: impl Iterator, + parent_info: &ParentInfo, + original_dst: Option, +) -> outbound::proxy_protocol::Kind { + let mut routes = routes + .map(|(gknn, route)| { + convert_outbound_route( + gknn, + route, + default_backend.clone(), + parent_info, + original_dst, + ) + }) + .collect::>(); + + if let ParentInfo::EgressNetwork { traffic_policy, .. } = parent_info { + routes.push(default_outbound_egress_route( + default_backend, + traffic_policy, + )); + } + + outbound::proxy_protocol::Kind::Opaque(outbound::proxy_protocol::Opaque { routes }) +} + +fn convert_outbound_route( + gknn: GroupKindNamespaceName, + TcpRoute { + rule, + creation_timestamp: _, + }: TcpRoute, + backend: outbound::Backend, + parent_info: &ParentInfo, + original_dst: Option, +) -> outbound::OpaqueRoute { + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Resource(meta::Resource { + group: gknn.group.to_string(), + kind: gknn.kind.to_string(), + namespace: gknn.namespace.to_string(), + name: gknn.name.to_string(), + ..Default::default() + })), + }); + + let backends = rule + .backends + .into_iter() + .map(|b| convert_backend(b, parent_info, original_dst)) + .collect::>(); + + let dist = if backends.is_empty() { + outbound::opaque_route::distribution::Kind::FirstAvailable( + outbound::opaque_route::distribution::FirstAvailable { + backends: vec![outbound::opaque_route::RouteBackend { + backend: Some(backend.clone()), + invalid: None, + }], + }, + ) + } else { + outbound::opaque_route::distribution::Kind::RandomAvailable( + outbound::opaque_route::distribution::RandomAvailable { backends }, + ) + }; + + let rules = vec![outbound::opaque_route::Rule { + backends: Some(outbound::opaque_route::Distribution { kind: Some(dist) }), + }]; + + outbound::OpaqueRoute { + metadata, + rules, + error: None, + } +} + +fn convert_backend( + backend: Backend, + parent_info: &ParentInfo, + original_dst: Option, +) -> outbound::opaque_route::WeightedRouteBackend { + let original_dst_port = original_dst.map(|o| o.port()); + + match backend { + Backend::Addr(addr) => { + let socket_addr = SocketAddr::new(addr.addr, addr.port.get()); + outbound::opaque_route::WeightedRouteBackend { + weight: addr.weight, + backend: Some(outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: None, + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(socket_addr.into()), + weight: addr.weight, + ..Default::default() + }, + )), + }), + invalid: None, + }), + } + } + Backend::Service(svc) if svc.exists => outbound::opaque_route::WeightedRouteBackend { + weight: svc.weight, + backend: Some(outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::service_meta(svc.clone())), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: svc.authority, + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), + }), + invalid: None, + }), + }, + Backend::Service(svc) => invalid_backend( + svc.weight, + format!("Service not found {}", svc.name), + super::service_meta(svc), + ), + Backend::EgressNetwork(egress_net) if egress_net.exists => { + match (parent_info, original_dst) { + ( + ParentInfo::EgressNetwork { + name, namespace, .. + }, + Some(original_dst), + ) => { + if *name == egress_net.name && *namespace == egress_net.namespace { + outbound::opaque_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), + invalid: None, + }), + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) + } + } + (ParentInfo::EgressNetwork { .. }, None) => invalid_backend( + egress_net.weight, + "EgressNetwork can be resolved from an ip:port combo only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + (ParentInfo::Service { .. }, _) => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + } + } + Backend::EgressNetwork(egress_net) => invalid_backend( + egress_net.weight, + format!("EgressNetwork not found {}", egress_net.name), + super::egress_net_meta(egress_net, original_dst_port), + ), + Backend::Invalid { weight, message } => invalid_backend( + weight, + message, + meta::Metadata { + kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + }, + ), + } +} + +fn invalid_backend( + weight: u32, + message: String, + meta: meta::Metadata, +) -> outbound::opaque_route::WeightedRouteBackend { + outbound::opaque_route::WeightedRouteBackend { + weight, + backend: Some(outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(meta), + queue: Some(default_queue_config()), + kind: None, + }), + invalid: Some(outbound::opaque_route::route_backend::Invalid { message }), + }), + } +} + +pub(crate) fn default_outbound_egress_route( + backend: outbound::Backend, + traffic_policy: &TrafficPolicy, +) -> outbound::OpaqueRoute { + let (error, name) = match traffic_policy { + TrafficPolicy::Allow => (None, "tcp-egress-allow"), + TrafficPolicy::Deny => ( + Some(outbound::opaque_route::RouteError { + kind: outbound::opaque_route::route_error::Kind::Forbidden as i32, + }), + "tcp-egress-deny", + ), + }; + + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Default(name.to_string())), + }); + let rules = vec![outbound::opaque_route::Rule { + backends: Some(outbound::opaque_route::Distribution { + kind: Some(outbound::opaque_route::distribution::Kind::FirstAvailable( + outbound::opaque_route::distribution::FirstAvailable { + backends: vec![outbound::opaque_route::RouteBackend { + backend: Some(backend), + invalid: None, + }], + }, + )), + }), + }]; + outbound::OpaqueRoute { + metadata, + rules, + error, + } +} diff --git a/policy-controller/grpc/src/outbound/tls.rs b/policy-controller/grpc/src/outbound/tls.rs new file mode 100644 index 0000000000000..a49d756c51863 --- /dev/null +++ b/policy-controller/grpc/src/outbound/tls.rs @@ -0,0 +1,268 @@ +use super::{default_balancer_config, default_queue_config}; +use crate::routes::convert_sni_match; +use linkerd2_proxy_api::{destination, meta, outbound}; +use linkerd_policy_controller_core::{ + outbound::{Backend, ParentInfo, TlsRoute, TrafficPolicy}, + routes::GroupKindNamespaceName, +}; +use std::net::SocketAddr; + +pub(crate) fn protocol( + default_backend: outbound::Backend, + routes: impl Iterator, + parent_info: &ParentInfo, + original_dst: Option, +) -> outbound::proxy_protocol::Kind { + let mut routes = routes + .map(|(gknn, route)| { + convert_outbound_route( + gknn, + route, + default_backend.clone(), + parent_info, + original_dst, + ) + }) + .collect::>(); + + if let ParentInfo::EgressNetwork { traffic_policy, .. } = parent_info { + routes.push(default_outbound_egress_route( + default_backend, + traffic_policy, + )); + } + + outbound::proxy_protocol::Kind::Tls(outbound::proxy_protocol::Tls { routes }) +} + +fn convert_outbound_route( + gknn: GroupKindNamespaceName, + TlsRoute { + hostnames, + rule, + creation_timestamp: _, + }: TlsRoute, + backend: outbound::Backend, + parent_info: &ParentInfo, + original_dst: Option, +) -> outbound::TlsRoute { + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Resource(meta::Resource { + group: gknn.group.to_string(), + kind: gknn.kind.to_string(), + namespace: gknn.namespace.to_string(), + name: gknn.name.to_string(), + ..Default::default() + })), + }); + + let snis = hostnames.into_iter().map(convert_sni_match).collect(); + + let backends = rule + .backends + .into_iter() + .map(|b| convert_backend(b, parent_info, original_dst)) + .collect::>(); + + let dist = if backends.is_empty() { + outbound::tls_route::distribution::Kind::FirstAvailable( + outbound::tls_route::distribution::FirstAvailable { + backends: vec![outbound::tls_route::RouteBackend { + backend: Some(backend.clone()), + invalid: None, + }], + }, + ) + } else { + outbound::tls_route::distribution::Kind::RandomAvailable( + outbound::tls_route::distribution::RandomAvailable { backends }, + ) + }; + + let rules = vec![outbound::tls_route::Rule { + backends: Some(outbound::tls_route::Distribution { kind: Some(dist) }), + }]; + + outbound::TlsRoute { + metadata, + snis, + rules, + error: None, + } +} + +fn convert_backend( + backend: Backend, + parent_info: &ParentInfo, + original_dst: Option, +) -> outbound::tls_route::WeightedRouteBackend { + let original_dst_port = original_dst.map(|o| o.port()); + + match backend { + Backend::Addr(addr) => { + let socket_addr = SocketAddr::new(addr.addr, addr.port.get()); + outbound::tls_route::WeightedRouteBackend { + weight: addr.weight, + backend: Some(outbound::tls_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: None, + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(socket_addr.into()), + weight: addr.weight, + ..Default::default() + }, + )), + }), + invalid: None, + }), + } + } + Backend::Service(svc) if svc.exists => outbound::tls_route::WeightedRouteBackend { + weight: svc.weight, + backend: Some(outbound::tls_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::service_meta(svc.clone())), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: svc.authority, + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), + }), + invalid: None, + }), + }, + Backend::Service(svc) => invalid_backend( + svc.weight, + format!("Service not found {}", svc.name), + super::service_meta(svc), + ), + Backend::EgressNetwork(egress_net) if egress_net.exists => { + match (parent_info, original_dst) { + ( + ParentInfo::EgressNetwork { + name, namespace, .. + }, + Some(original_dst), + ) => { + if *name == egress_net.name && *namespace == egress_net.namespace { + outbound::tls_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::tls_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), + invalid: None, + }), + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) + } + } + (ParentInfo::EgressNetwork { .. }, None) => invalid_backend( + egress_net.weight, + "EgressNetwork can be resolved from an ip:port combo only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + (ParentInfo::Service { .. }, _) => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + } + } + Backend::EgressNetwork(egress_net) => invalid_backend( + egress_net.weight, + format!("EgressNetwork not found {}", egress_net.name), + super::egress_net_meta(egress_net, original_dst_port), + ), + Backend::Invalid { weight, message } => invalid_backend( + weight, + message, + meta::Metadata { + kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + }, + ), + } +} + +fn invalid_backend( + weight: u32, + message: String, + meta: meta::Metadata, +) -> outbound::tls_route::WeightedRouteBackend { + outbound::tls_route::WeightedRouteBackend { + weight, + backend: Some(outbound::tls_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(meta), + queue: Some(default_queue_config()), + kind: None, + }), + invalid: Some(outbound::tls_route::route_backend::Invalid { message }), + }), + } +} + +pub(crate) fn default_outbound_egress_route( + backend: outbound::Backend, + traffic_policy: &TrafficPolicy, +) -> outbound::TlsRoute { + let (error, name) = match traffic_policy { + TrafficPolicy::Allow => (None, "tls-egress-allow"), + TrafficPolicy::Deny => ( + Some(outbound::tls_route::RouteError { + kind: outbound::tls_route::route_error::Kind::Forbidden as i32, + }), + "tls-egress-deny", + ), + }; + + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Default(name.to_string())), + }); + let rules = vec![outbound::tls_route::Rule { + backends: Some(outbound::tls_route::Distribution { + kind: Some(outbound::tls_route::distribution::Kind::FirstAvailable( + outbound::tls_route::distribution::FirstAvailable { + backends: vec![outbound::tls_route::RouteBackend { + backend: Some(backend), + invalid: None, + }], + }, + )), + }), + }]; + outbound::TlsRoute { + metadata, + rules, + error, + ..Default::default() + } +} diff --git a/policy-controller/grpc/src/routes.rs b/policy-controller/grpc/src/routes.rs index 86d679cacb305..b960c6cebb18b 100644 --- a/policy-controller/grpc/src/routes.rs +++ b/policy-controller/grpc/src/routes.rs @@ -1,4 +1,4 @@ -use linkerd2_proxy_api::{http_route as proto, http_types}; +use linkerd2_proxy_api::{http_route as proto, http_types, tls_route as tls_proto}; use linkerd_policy_controller_core::routes::{ HeaderModifierFilter, HostMatch, PathModifier, RequestRedirectFilter, }; @@ -19,6 +19,19 @@ pub(crate) fn convert_host_match(h: HostMatch) -> proto::HostMatch { } } +pub(crate) fn convert_sni_match(h: HostMatch) -> tls_proto::SniMatch { + tls_proto::SniMatch { + r#match: Some(match h { + HostMatch::Exact(host) => tls_proto::sni_match::Match::Exact(host), + HostMatch::Suffix { reverse_labels } => { + tls_proto::sni_match::Match::Suffix(tls_proto::sni_match::Suffix { + reverse_labels: reverse_labels.to_vec(), + }) + } + }), + } +} + pub(crate) fn convert_request_header_modifier_filter( HeaderModifierFilter { add, set, remove }: HeaderModifierFilter, ) -> proto::RequestHeaderModifier { diff --git a/policy-controller/k8s/api/src/policy/network.rs b/policy-controller/k8s/api/src/policy/network.rs index 7448664e50ecc..d0da7ea8be0e0 100644 --- a/policy-controller/k8s/api/src/policy/network.rs +++ b/policy-controller/k8s/api/src/policy/network.rs @@ -1,3 +1,5 @@ +use std::net::IpAddr; + use ipnet::IpNet; #[derive( @@ -19,6 +21,26 @@ impl Network { intersect && !cidr_is_exception } + + #[inline] + pub fn contains(&self, addr: IpAddr) -> bool { + let addr = Cidr::Addr(addr); + let addr_is_exception = self.except.iter().flatten().any(|ex| ex.contains(&addr)); + if addr_is_exception { + return false; + } + + self.cidr.contains(&addr) + } + + /// Returns the size of this Network. The size is the + /// cidr size - the sum of the exception sizes. We assume + /// that exceptions do not overlap. + #[inline] + pub fn block_size(&self) -> usize { + let except_size: usize = self.except.iter().flatten().map(|c| c.block_size()).sum(); + self.cidr.block_size() - except_size + } } #[derive( @@ -55,6 +77,17 @@ impl Cidr { Self::Net(IpNet::V6(_)) => true, } } + + /// Returns the size of this CIDR block. + /// + /// Returns `1` if this represents a single address. + #[inline] + pub fn block_size(&self) -> usize { + match self { + Cidr::Net(net) => net.hosts().count(), + Cidr::Addr(_) => 1, + } + } } impl std::str::FromStr for Cidr { diff --git a/policy-controller/k8s/index/Cargo.toml b/policy-controller/k8s/index/Cargo.toml index cf074fc3343d1..b123401611677 100644 --- a/policy-controller/k8s/index/Cargo.toml +++ b/policy-controller/k8s/index/Cargo.toml @@ -8,6 +8,7 @@ publish = false [dependencies] ahash = "0.8" anyhow = "1" +chrono = { version = "0.4.38", default_features = false } futures = { version = "0.3", default-features = false } http = "0.2" kube = { version = "0.87.1", default-features = false, features = [ diff --git a/policy-controller/k8s/index/src/inbound/index/grpc.rs b/policy-controller/k8s/index/src/inbound/index/grpc.rs index 7908816ac1a39..cd020a03563f4 100644 --- a/policy-controller/k8s/index/src/inbound/index/grpc.rs +++ b/policy-controller/k8s/index/src/inbound/index/grpc.rs @@ -19,7 +19,7 @@ impl TryFrom for RouteBinding { .hostnames .into_iter() .flatten() - .map(crate::routes::http::host_match) + .map(crate::routes::host_match) .collect(); let rules = route diff --git a/policy-controller/k8s/index/src/inbound/index/http.rs b/policy-controller/k8s/index/src/inbound/index/http.rs index fa52a1a699aa3..4b9fa2a82cd5d 100644 --- a/policy-controller/k8s/index/src/inbound/index/http.rs +++ b/policy-controller/k8s/index/src/inbound/index/http.rs @@ -20,7 +20,7 @@ impl TryFrom for RouteBinding { .hostnames .into_iter() .flatten() - .map(crate::routes::http::host_match) + .map(crate::routes::host_match) .collect(); let rules = route @@ -66,7 +66,7 @@ impl TryFrom for RouteBinding { .hostnames .into_iter() .flatten() - .map(crate::routes::http::host_match) + .map(crate::routes::host_match) .collect(); let rules = route diff --git a/policy-controller/k8s/index/src/outbound.rs b/policy-controller/k8s/index/src/outbound.rs index eeffd5a10e107..ff51cea36fefe 100644 --- a/policy-controller/k8s/index/src/outbound.rs +++ b/policy-controller/k8s/index/src/outbound.rs @@ -1,6 +1,6 @@ pub mod index; -pub use index::{metrics, Index, ServiceRef, SharedIndex}; +pub use index::{metrics, Index, ResourceRef, SharedIndex}; #[cfg(test)] mod tests; diff --git a/policy-controller/k8s/index/src/outbound/index.rs b/policy-controller/k8s/index/src/outbound/index.rs index e84e00122532c..553809012828e 100644 --- a/policy-controller/k8s/index/src/outbound/index.rs +++ b/policy-controller/k8s/index/src/outbound/index.rs @@ -5,36 +5,59 @@ use crate::{ }; use ahash::AHashMap as HashMap; use anyhow::{bail, ensure, Result}; +use egress_network::EgressNetwork; use linkerd_policy_controller_core::{ outbound::{ Backend, Backoff, FailureAccrual, GrpcRetryCondition, GrpcRoute, HttpRetryCondition, - HttpRoute, OutboundPolicy, RouteRetry, RouteSet, RouteTimeouts, + HttpRoute, Kind, OutboundPolicy, ParentInfo, ResourceTarget, RouteRetry, RouteSet, + RouteTimeouts, TcpRoute, TlsRoute, TrafficPolicy, }, routes::GroupKindNamespaceName, }; use linkerd_policy_controller_k8s_api::{ gateway::{self as k8s_gateway_api, ParentReference}, - policy as linkerd_k8s_api, ResourceExt, Service, + policy::{self as linkerd_k8s_api, Cidr}, + ResourceExt, Service, }; use parking_lot::RwLock; use std::{hash::Hash, net::IpAddr, num::NonZeroU16, sync::Arc, time}; use tokio::sync::watch; +#[allow(dead_code)] #[derive(Debug)] pub struct Index { namespaces: NamespaceIndex, - services_by_ip: HashMap, - service_info: HashMap, + services_by_ip: HashMap, + egress_networks_by_ref: HashMap, + // holds information about resources. currently EgressNetworks and Services + resource_info: HashMap, + cluster_networks: Vec, + + // holds a no-op sender to which all clients that have been returned + // a Fallback policy are subsribed. It is used to force these clients + // to reconnect an obtain new policy once the current one may no longer + // be valid + fallback_polcy_tx: watch::Sender<()>, } +pub mod egress_network; pub mod grpc; pub mod http; pub mod metrics; +pub mod tcp; +pub(crate) mod tls; pub type SharedIndex = Arc>; #[derive(Debug, Clone, Hash, PartialEq, Eq)] -pub struct ServiceRef { +pub enum ResourceKind { + EgressNetwork, + Service, +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq)] +pub struct ResourceRef { + pub kind: ResourceKind, pub name: String, pub namespace: String, } @@ -48,38 +71,43 @@ struct NamespaceIndex { #[derive(Debug)] struct Namespace { - /// Stores an observable handle for each known service:port, + /// Stores an observable handle for each known resource:port, /// as well as any route resources in the cluster that specify /// a port. - service_port_routes: HashMap, + resource_port_routes: HashMap, /// Stores the route resources (by service name) that do not - /// explicitly target a port. + /// explicitly target a port. These are only valid for Service + /// as EgressNetworks cannot be parents without an explicit + /// port declaration service_http_routes: HashMap>, service_grpc_routes: HashMap>, + service_tls_routes: HashMap>, + service_tcp_routes: HashMap>, namespace: Arc, } -#[derive(Debug, Default)] -struct ServiceInfo { +#[derive(Debug)] +struct ResourceInfo { opaque_ports: PortSet, accrual: Option, http_retry: Option>, grpc_retry: Option>, timeouts: RouteTimeouts, + traffic_policy: Option, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] -struct ServicePort { - service: String, +struct ResourcePort { + kind: ResourceKind, + name: String, port: NonZeroU16, } #[derive(Debug)] -struct ServiceRoutes { +struct ResourceRoutes { + parent_info: ParentInfo, namespace: Arc, - name: String, port: NonZeroU16, - authority: String, watches_by_ns: HashMap, opaque: bool, accrual: Option, @@ -90,6 +118,7 @@ struct ServiceRoutes { #[derive(Debug)] struct RoutesWatch { + parent_info: ParentInfo, opaque: bool, accrual: Option, http_retry: Option>, @@ -97,6 +126,8 @@ struct RoutesWatch { timeouts: RouteTimeouts, http_routes: RouteSet, grpc_routes: RouteSet, + tls_routes: RouteSet, + tcp_routes: RouteSet, watch: watch::Sender, } @@ -146,6 +177,36 @@ impl kubert::index::IndexNamespacedResource for Inde } } +impl kubert::index::IndexNamespacedResource for Index { + fn apply(&mut self, route: k8s_gateway_api::TlsRoute) { + self.apply_tls(route) + } + + fn delete(&mut self, namespace: String, name: String) { + let gknn = name + .gkn::() + .namespaced(namespace); + for ns_index in self.namespaces.by_ns.values_mut() { + ns_index.delete_tls_route(&gknn); + } + } +} + +impl kubert::index::IndexNamespacedResource for Index { + fn apply(&mut self, route: k8s_gateway_api::TcpRoute) { + self.apply_tcp(route) + } + + fn delete(&mut self, namespace: String, name: String) { + let gknn = name + .gkn::() + .namespaced(namespace); + for ns_index in self.namespaces.by_ns.values_mut() { + ns_index.delete_tcp_route(&gknn); + } + } +} + impl kubert::index::IndexNamespacedResource for Index { fn apply(&mut self, service: Service) { let name = service.name_unchecked(); @@ -180,7 +241,8 @@ impl kubert::index::IndexNamespacedResource for Index { } match cluster_ip.parse() { Ok(addr) => { - let service_ref = ServiceRef { + let service_ref = ResourceRef { + kind: ResourceKind::Service, name: name.clone(), namespace: ns.clone(), }; @@ -193,12 +255,13 @@ impl kubert::index::IndexNamespacedResource for Index { } } - let service_info = ServiceInfo { + let service_info = ResourceInfo { opaque_ports, accrual, http_retry, grpc_retry, timeouts, + traffic_policy: None, }; self.namespaces @@ -207,90 +270,243 @@ impl kubert::index::IndexNamespacedResource for Index { .or_insert_with(|| Namespace { service_http_routes: Default::default(), service_grpc_routes: Default::default(), - service_port_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), namespace: Arc::new(ns), }) - .update_service(service.name_unchecked(), &service_info); + .update_resource( + service.name_unchecked(), + ResourceKind::Service, + &service_info, + ); - self.service_info.insert( - ServiceRef { + self.resource_info.insert( + ResourceRef { + kind: ResourceKind::Service, name: service.name_unchecked(), namespace: service.namespace().expect("Service must have Namespace"), }, service_info, ); - self.reindex_services() + self.reindex_resources(); } fn delete(&mut self, namespace: String, name: String) { tracing::debug!(name, namespace, "deleting service"); - let service_ref = ServiceRef { name, namespace }; - self.service_info.remove(&service_ref); + let service_ref = ResourceRef { + kind: ResourceKind::Service, + name, + namespace, + }; + self.resource_info.remove(&service_ref); self.services_by_ip.retain(|_, v| *v != service_ref); - self.reindex_services() + self.reindex_resources(); + } +} + +impl kubert::index::IndexNamespacedResource for Index { + fn apply(&mut self, egress_network: linkerd_k8s_api::EgressNetwork) { + let name = egress_network.name_unchecked(); + let ns = egress_network + .namespace() + .expect("EgressNetwork must have a namespace"); + tracing::debug!(name, ns, "indexing EgressNetwork"); + let accrual = parse_accrual_config(egress_network.annotations()) + .map_err(|error| tracing::error!(%error, service=name, namespace=ns, "failed to parse accrual config")) + .unwrap_or_default(); + let opaque_ports = ports_annotation( + egress_network.annotations(), + "config.linkerd.io/opaque-ports", + ) + .unwrap_or_else(|| self.namespaces.cluster_info.default_opaque_ports.clone()); + + let timeouts = parse_timeouts(egress_network.annotations()) + .map_err(|error| tracing::error!(%error, service=name, namespace=ns, "failed to parse timeouts")) + .unwrap_or_default(); + + let http_retry = http::parse_http_retry(egress_network.annotations()).map_err(|error| { + tracing::error!(%error, service=name, namespace=ns, "failed to parse http retry") + }).unwrap_or_default(); + let grpc_retry = grpc::parse_grpc_retry(egress_network.annotations()).map_err(|error| { + tracing::error!(%error, service=name, namespace=ns, "failed to parse grpc retry") + }).unwrap_or_default(); + + let egress_net_ref = ResourceRef { + kind: ResourceKind::EgressNetwork, + name: name.clone(), + namespace: ns.clone(), + }; + + let egress_net = + EgressNetwork::from_resource(&egress_network, self.cluster_networks.clone()); + + let traffic_policy = Some(match egress_net.traffic_policy { + linkerd_k8s_api::TrafficPolicy::Allow => TrafficPolicy::Allow, + linkerd_k8s_api::TrafficPolicy::Deny => TrafficPolicy::Deny, + }); + + self.egress_networks_by_ref + .insert(egress_net_ref.clone(), egress_net); + + let egress_network_info = ResourceInfo { + opaque_ports, + accrual, + http_retry, + grpc_retry, + timeouts, + traffic_policy, + }; + + self.namespaces + .by_ns + .entry(ns.clone()) + .or_insert_with(|| Namespace { + service_http_routes: Default::default(), + service_grpc_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), + namespace: Arc::new(ns), + }) + .update_resource( + egress_network.name_unchecked(), + ResourceKind::EgressNetwork, + &egress_network_info, + ); + + self.resource_info + .insert(egress_net_ref, egress_network_info); + + self.reindex_resources(); + self.reinitialize_egress_watches(); + self.reinitialize_fallback_watches() + } + + fn delete(&mut self, namespace: String, name: String) { + tracing::debug!(name, namespace, "deleting EgressNetwork"); + let egress_net_ref = ResourceRef { + kind: ResourceKind::EgressNetwork, + name, + namespace, + }; + self.egress_networks_by_ref.remove(&egress_net_ref); + + self.reindex_resources(); + self.reinitialize_egress_watches(); + self.reinitialize_fallback_watches() } } impl Index { pub fn shared(cluster_info: Arc) -> SharedIndex { + let cluster_networks = cluster_info.networks.clone(); + let (fallback_polcy_tx, _) = watch::channel(()); Arc::new(RwLock::new(Self { namespaces: NamespaceIndex { by_ns: HashMap::default(), cluster_info, }, services_by_ip: HashMap::default(), - service_info: HashMap::default(), + egress_networks_by_ref: HashMap::default(), + resource_info: HashMap::default(), + cluster_networks: cluster_networks.into_iter().map(Cidr::from).collect(), + fallback_polcy_tx, })) } + pub fn is_address_in_cluster(&self, addr: IpAddr) -> bool { + self.cluster_networks + .iter() + .any(|net| net.contains(&addr.into())) + } + + pub fn fallback_policy_rx(&self) -> watch::Receiver<()> { + self.fallback_polcy_tx.subscribe() + } + + fn reinitialize_fallback_watches(&mut self) { + let (new_fallback_tx, _) = watch::channel(()); + self.fallback_polcy_tx = new_fallback_tx; + } + pub fn outbound_policy_rx( &mut self, - service_name: String, - service_namespace: String, - service_port: NonZeroU16, - source_namespace: String, + target: ResourceTarget, ) -> Result> { + let ResourceTarget { + name, + namespace, + port, + source_namespace, + kind, + } = target; + + let kind = match kind { + Kind::EgressNetwork { .. } => ResourceKind::EgressNetwork, + Kind::Service { .. } => ResourceKind::Service, + }; + let ns = self .namespaces .by_ns - .entry(service_namespace.clone()) + .entry(namespace.clone()) .or_insert_with(|| Namespace { - namespace: Arc::new(service_namespace.to_string()), + namespace: Arc::new(namespace.to_string()), service_http_routes: Default::default(), service_grpc_routes: Default::default(), - service_port_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), }); - let key = ServicePort { - service: service_name, - port: service_port, - }; + let key = ResourcePort { kind, name, port }; - tracing::debug!(?key, "subscribing to service port"); + tracing::debug!(?key, "subscribing to resource port"); let routes = - ns.service_routes_or_default(key, &self.namespaces.cluster_info, &self.service_info); + ns.resource_routes_or_default(key, &self.namespaces.cluster_info, &self.resource_info); let watch = routes.watch_for_ns_or_default(source_namespace); Ok(watch.watch.subscribe()) } - pub fn lookup_service(&self, addr: IpAddr) -> Option { - self.services_by_ip.get(&addr).cloned() + pub fn lookup_service(&self, addr: IpAddr) -> Option<(String, String)> { + self.services_by_ip + .get(&addr) + .cloned() + .map(|r| (r.namespace, r.name)) + } + + pub fn lookup_egress_network( + &self, + addr: IpAddr, + source_namespace: String, + ) -> Option<(String, String)> { + egress_network::resolve_egress_network( + addr, + source_namespace, + self.egress_networks_by_ref.values(), + ) + .map(|r| (r.namespace, r.name)) } fn apply_http(&mut self, route: HttpRouteResource) { tracing::debug!(name = route.name(), "indexing httproute"); for parent_ref in route.inner().parent_refs.iter().flatten() { - if !is_parent_service(parent_ref) { + let parent_kind = if is_parent_service(parent_ref) { + ResourceKind::Service + } else if is_parent_egress_network(parent_ref) { + ResourceKind::EgressNetwork + } else { continue; - } + }; - if !route_accepted_by_service(route.status(), &parent_ref.name) { + if !route_accepted_by_parent(route.status(), &parent_ref.name) { continue; } @@ -306,13 +522,16 @@ impl Index { namespace: Arc::new(ns), service_http_routes: Default::default(), service_grpc_routes: Default::default(), - service_port_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), }) .apply_http_route( route.clone(), parent_ref, + parent_kind, &self.namespaces.cluster_info, - &self.service_info, + &self.resource_info, ); } } @@ -321,11 +540,15 @@ impl Index { tracing::debug!(name = route.name_unchecked(), "indexing grpcroute"); for parent_ref in route.spec.inner.parent_refs.iter().flatten() { - if !is_parent_service(parent_ref) { + let parent_kind = if is_parent_service(parent_ref) { + ResourceKind::Service + } else if is_parent_egress_network(parent_ref) { + ResourceKind::EgressNetwork + } else { continue; - } + }; - if !route_accepted_by_service(route.status.as_ref().map(|s| &s.inner), &parent_ref.name) + if !route_accepted_by_parent(route.status.as_ref().map(|s| &s.inner), &parent_ref.name) { continue; } @@ -342,20 +565,115 @@ impl Index { namespace: Arc::new(ns), service_http_routes: Default::default(), service_grpc_routes: Default::default(), - service_port_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), }) .apply_grpc_route( route.clone(), parent_ref, + parent_kind, + &self.namespaces.cluster_info, + &self.resource_info, + ); + } + } + + fn apply_tls(&mut self, route: k8s_gateway_api::TlsRoute) { + tracing::debug!(name = route.name_unchecked(), "indexing tlsroute"); + + for parent_ref in route.spec.inner.parent_refs.iter().flatten() { + let parent_kind = if is_parent_service(parent_ref) { + ResourceKind::Service + } else if is_parent_egress_network(parent_ref) { + ResourceKind::EgressNetwork + } else { + continue; + }; + + if !route_accepted_by_parent(route.status.as_ref().map(|s| &s.inner), &parent_ref.name) + { + continue; + } + + let ns = parent_ref + .namespace + .clone() + .unwrap_or_else(|| route.namespace().expect("TlsRoute must have a namespace")); + + self.namespaces + .by_ns + .entry(ns.clone()) + .or_insert_with(|| Namespace { + namespace: Arc::new(ns), + service_http_routes: Default::default(), + service_grpc_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), + }) + .apply_tls_route( + route.clone(), + parent_ref, + parent_kind, &self.namespaces.cluster_info, - &self.service_info, + &self.resource_info, ); } } - fn reindex_services(&mut self) { + fn apply_tcp(&mut self, route: k8s_gateway_api::TcpRoute) { + tracing::debug!(name = route.name_unchecked(), "indexing tcproute"); + + for parent_ref in route.spec.inner.parent_refs.iter().flatten() { + let parent_kind = if is_parent_service(parent_ref) { + ResourceKind::Service + } else if is_parent_egress_network(parent_ref) { + ResourceKind::EgressNetwork + } else { + continue; + }; + + if !route_accepted_by_parent(route.status.as_ref().map(|s| &s.inner), &parent_ref.name) + { + continue; + } + + let ns = parent_ref + .namespace + .clone() + .unwrap_or_else(|| route.namespace().expect("TcpRoute must have a namespace")); + + self.namespaces + .by_ns + .entry(ns.clone()) + .or_insert_with(|| Namespace { + namespace: Arc::new(ns), + service_http_routes: Default::default(), + service_grpc_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), + }) + .apply_tcp_route( + route.clone(), + parent_ref, + parent_kind, + &self.namespaces.cluster_info, + &self.resource_info, + ); + } + } + + fn reindex_resources(&mut self) { + for ns in self.namespaces.by_ns.values_mut() { + ns.reindex_resources(&self.resource_info); + } + } + + fn reinitialize_egress_watches(&mut self) { for ns in self.namespaces.by_ns.values_mut() { - ns.reindex_services(&self.service_info); + ns.reinitialize_egress_watches(); } } } @@ -365,53 +683,59 @@ impl Namespace { &mut self, route: HttpRouteResource, parent_ref: &ParentReference, + parent_kind: ResourceKind, cluster_info: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, ) { tracing::debug!(?route); - let outbound_route = - match http::convert_route(&self.namespace, route.clone(), cluster_info, service_info) { - Ok(route) => route, - Err(error) => { - tracing::error!(%error, "failed to convert route"); - return; - } - }; + let outbound_route = match http::convert_route( + &self.namespace, + route.clone(), + cluster_info, + resource_info, + ) { + Ok(route) => route, + Err(error) => { + tracing::error!(%error, "failed to convert route"); + return; + } + }; tracing::debug!(?outbound_route); let port = parent_ref.port.and_then(NonZeroU16::new); if let Some(port) = port { - let service_port = ServicePort { + let resource_port = ResourcePort { + kind: parent_kind, port, - service: parent_ref.name.clone(), + name: parent_ref.name.clone(), }; tracing::debug!( - ?service_port, + ?resource_port, route = route.name(), - "inserting httproute for service" + "inserting httproute for resource" ); let service_routes = - self.service_routes_or_default(service_port, cluster_info, service_info); + self.resource_routes_or_default(resource_port, cluster_info, resource_info); service_routes.apply_http_route(route.gknn(), outbound_route); } else { // If the parent_ref doesn't include a port, apply this route - // to all ServiceRoutes which match the Service name. - self.service_port_routes.iter_mut().for_each( - |(ServicePort { service, port: _ }, routes)| { - if service == &parent_ref.name { + // to all ResourceRoutes which match the resource name. + self.resource_port_routes.iter_mut().for_each( + |(ResourcePort { name, port: _, .. }, routes)| { + if name == &parent_ref.name { routes.apply_http_route(route.gknn(), outbound_route.clone()); } }, ); // Also add the route to the list of routes that target the - // Service without specifying a port. + // resource without specifying a port. self.service_http_routes .entry(parent_ref.name.clone()) .or_default() @@ -423,13 +747,79 @@ impl Namespace { &mut self, route: k8s_gateway_api::GrpcRoute, parent_ref: &ParentReference, + parent_kind: ResourceKind, cluster_info: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, ) { tracing::debug!(?route); + let outbound_route = match grpc::convert_route( + &self.namespace, + route.clone(), + cluster_info, + resource_info, + ) { + Ok(route) => route, + Err(error) => { + tracing::error!(%error, "failed to convert route"); + return; + } + }; + + tracing::debug!(?outbound_route); + + let gknn = route + .gkn() + .namespaced(route.namespace().expect("Route must have namespace")); + + let port = parent_ref.port.and_then(NonZeroU16::new); + + if let Some(port) = port { + let port = ResourcePort { + kind: parent_kind, + port, + name: parent_ref.name.clone(), + }; + + tracing::debug!( + ?port, + route = route.name_unchecked(), + "inserting grpcroute for resource" + ); + + let service_routes = self.resource_routes_or_default(port, cluster_info, resource_info); + + service_routes.apply_grpc_route(gknn, outbound_route); + } else { + // If the parent_ref doesn't include a port, apply this route + // to all ResourceRoutes which match the resource name. + self.resource_port_routes.iter_mut().for_each( + |(ResourcePort { name, port: _, .. }, routes)| { + if name == &parent_ref.name { + routes.apply_grpc_route(gknn.clone(), outbound_route.clone()); + } + }, + ); + // Also add the route to the list of routes that target the + // resource without specifying a port. + self.service_grpc_routes + .entry(parent_ref.name.clone()) + .or_default() + .insert(gknn, outbound_route); + } + } + + fn apply_tls_route( + &mut self, + route: k8s_gateway_api::TlsRoute, + parent_ref: &ParentReference, + parent_kind: ResourceKind, + cluster_info: &ClusterInfo, + resource_info: &HashMap, + ) { + tracing::debug!(?route); let outbound_route = - match grpc::convert_route(&self.namespace, route.clone(), cluster_info, service_info) { + match tls::convert_route(&self.namespace, route.clone(), cluster_info, resource_info) { Ok(route) => route, Err(error) => { tracing::error!(%error, "failed to convert route"); @@ -446,53 +836,130 @@ impl Namespace { let port = parent_ref.port.and_then(NonZeroU16::new); if let Some(port) = port { - let service_port = ServicePort { + let port = ResourcePort { + kind: parent_kind, port, - service: parent_ref.name.clone(), + name: parent_ref.name.clone(), }; tracing::debug!( - ?service_port, + ?port, route = route.name_unchecked(), - "inserting grpcroute for service" + "inserting tlsroute for resource" ); - let service_routes = - self.service_routes_or_default(service_port, cluster_info, service_info); + let resource_routes = + self.resource_routes_or_default(port, cluster_info, resource_info); - service_routes.apply_grpc_route(gknn, outbound_route); + resource_routes.apply_tls_route(gknn, outbound_route); } else { // If the parent_ref doesn't include a port, apply this route - // to all ServiceRoutes which match the Service name. - self.service_port_routes.iter_mut().for_each( - |(ServicePort { service, port: _ }, routes)| { - if service == &parent_ref.name { - routes.apply_grpc_route(gknn.clone(), outbound_route.clone()); + // to all ResourceRoutes which match the resource name. + self.resource_port_routes.iter_mut().for_each( + |(ResourcePort { name, port: _, .. }, routes)| { + if name == &parent_ref.name { + routes.apply_tls_route(gknn.clone(), outbound_route.clone()); } }, ); // Also add the route to the list of routes that target the - // Service without specifying a port. - self.service_grpc_routes + // resource without specifying a port. + self.service_tls_routes .entry(parent_ref.name.clone()) .or_default() .insert(gknn, outbound_route); } } - fn reindex_services(&mut self, service_info: &HashMap) { - let update_service = |backend: &mut Backend| { - if let Backend::Service(svc) = backend { - let service_ref = ServiceRef { - name: svc.name.clone(), - namespace: svc.namespace.clone(), - }; - svc.exists = service_info.contains_key(&service_ref); - } + fn apply_tcp_route( + &mut self, + route: k8s_gateway_api::TcpRoute, + parent_ref: &ParentReference, + parent_kind: ResourceKind, + cluster_info: &ClusterInfo, + resource_info: &HashMap, + ) { + tracing::debug!(?route); + let outbound_route = + match tcp::convert_route(&self.namespace, route.clone(), cluster_info, resource_info) { + Ok(route) => route, + Err(error) => { + tracing::error!(%error, "failed to convert route"); + return; + } + }; + + tracing::debug!(?outbound_route); + + let gknn = route + .gkn() + .namespaced(route.namespace().expect("Route must have namespace")); + + let port = parent_ref.port.and_then(NonZeroU16::new); + + if let Some(port) = port { + let port = ResourcePort { + kind: parent_kind, + port, + name: parent_ref.name.clone(), + }; + + tracing::debug!( + ?port, + route = route.name_unchecked(), + "inserting tcproute for resource" + ); + + let resource_routes = + self.resource_routes_or_default(port, cluster_info, resource_info); + + resource_routes.apply_tcp_route(gknn, outbound_route); + } else { + // If the parent_ref doesn't include a port, apply this route + // to all ResourceRoutes which match the resource name. + self.resource_port_routes.iter_mut().for_each( + |(ResourcePort { name, port: _, .. }, routes)| { + if name == &parent_ref.name { + routes.apply_tcp_route(gknn.clone(), outbound_route.clone()); + } + }, + ); + + // Also add the route to the list of routes that target the + // resource without specifying a port. + self.service_tcp_routes + .entry(parent_ref.name.clone()) + .or_default() + .insert(gknn, outbound_route); + } + } + + fn reindex_resources(&mut self, resource_info: &HashMap) { + let update_backend = |backend: &mut Backend| { + match backend { + Backend::Service(svc) => { + let service_ref = ResourceRef { + kind: ResourceKind::Service, + name: svc.name.clone(), + namespace: svc.namespace.clone(), + }; + svc.exists = resource_info.contains_key(&service_ref); + } + Backend::EgressNetwork(egress_net) => { + let egress_net_ref = ResourceRef { + kind: ResourceKind::EgressNetwork, + name: egress_net.name.clone(), + namespace: egress_net.namespace.clone(), + }; + egress_net.exists = resource_info.contains_key(&egress_net_ref); + } + + _ => {} + }; }; - for routes in self.service_port_routes.values_mut() { + for routes in self.resource_port_routes.values_mut() { for watch in routes.watches_by_ns.values_mut() { let http_backends = watch .http_routes @@ -504,36 +971,58 @@ impl Namespace { .values_mut() .flat_map(|route| route.rules.iter_mut()) .flat_map(|rule| rule.backends.iter_mut()); + let tls_backends = watch + .tls_routes + .values_mut() + .flat_map(|route| route.rule.backends.iter_mut()); + let tcp_backends = watch + .tcp_routes + .values_mut() + .flat_map(|route| route.rule.backends.iter_mut()); + + http_backends + .chain(grpc_backends) + .chain(tls_backends) + .chain(tcp_backends) + .for_each(update_backend); - http_backends.chain(grpc_backends).for_each(update_service); watch.send_if_modified(); } } } - fn update_service(&mut self, name: String, service: &ServiceInfo) { - tracing::debug!(?name, ?service, "updating service"); + fn reinitialize_egress_watches(&mut self) { + for routes in self.resource_port_routes.values_mut() { + if let ParentInfo::EgressNetwork { .. } = routes.parent_info { + routes.reinitialize_watches(); + } + } + } - for (svc_port, svc_routes) in self.service_port_routes.iter_mut() { - if svc_port.service != name { + fn update_resource(&mut self, name: String, kind: ResourceKind, resource: &ResourceInfo) { + tracing::debug!(?name, ?resource, "updating resource"); + + for (resource_port, resource_routes) in self.resource_port_routes.iter_mut() { + if resource_port.name != name || kind != resource_port.kind { continue; } - let opaque = service.opaque_ports.contains(&svc_port.port); + let opaque = resource.opaque_ports.contains(&resource_port.port); - svc_routes.update_service( + resource_routes.update_resource( opaque, - service.accrual, - service.http_retry.clone(), - service.grpc_retry.clone(), - service.timeouts.clone(), + resource.accrual, + resource.http_retry.clone(), + resource.grpc_retry.clone(), + resource.timeouts.clone(), + resource.traffic_policy, ); } } fn delete_http_route(&mut self, gknn: &GroupKindNamespaceName) { - for service in self.service_port_routes.values_mut() { - service.delete_http_route(gknn); + for resource in self.resource_port_routes.values_mut() { + resource.delete_http_route(gknn); } self.service_http_routes.retain(|_, routes| { @@ -543,8 +1032,8 @@ impl Namespace { } fn delete_grpc_route(&mut self, gknn: &GroupKindNamespaceName) { - for service in self.service_port_routes.values_mut() { - service.delete_grpc_route(gknn); + for resource in self.resource_port_routes.values_mut() { + resource.delete_grpc_route(gknn); } self.service_grpc_routes.retain(|_, routes| { @@ -553,58 +1042,111 @@ impl Namespace { }); } - fn service_routes_or_default( + fn delete_tls_route(&mut self, gknn: &GroupKindNamespaceName) { + for resource in self.resource_port_routes.values_mut() { + resource.delete_tls_route(gknn); + } + + self.service_tls_routes.retain(|_, routes| { + routes.remove(gknn); + !routes.is_empty() + }); + } + + fn delete_tcp_route(&mut self, gknn: &GroupKindNamespaceName) { + for resource in self.resource_port_routes.values_mut() { + resource.delete_tcp_route(gknn); + } + + self.service_tcp_routes.retain(|_, routes| { + routes.remove(gknn); + !routes.is_empty() + }); + } + + fn resource_routes_or_default( &mut self, - sp: ServicePort, + rp: ResourcePort, cluster: &ClusterInfo, - service_info: &HashMap, - ) -> &mut ServiceRoutes { - self.service_port_routes - .entry(sp.clone()) + resource_info: &HashMap, + ) -> &mut ResourceRoutes { + self.resource_port_routes + .entry(rp.clone()) .or_insert_with(|| { - let authority = - cluster.service_dns_authority(&self.namespace, &sp.service, sp.port); - - let service_ref = ServiceRef { - name: sp.service.clone(), + let resource_ref = ResourceRef { + name: rp.name.clone(), namespace: self.namespace.to_string(), + kind: rp.kind.clone(), }; + let mut parent_info = match rp.kind { + ResourceKind::EgressNetwork => ParentInfo::EgressNetwork { + traffic_policy: TrafficPolicy::Deny, + name: resource_ref.name.clone(), + namespace: resource_ref.namespace.clone(), + }, + ResourceKind::Service => { + let authority = + cluster.service_dns_authority(&self.namespace, &rp.name, rp.port); + ParentInfo::Service { + authority, + name: resource_ref.name.clone(), + namespace: resource_ref.namespace.clone(), + } + } + }; let mut opaque = false; let mut accrual = None; let mut http_retry = None; let mut grpc_retry = None; let mut timeouts = Default::default(); - if let Some(svc) = service_info.get(&service_ref) { - opaque = svc.opaque_ports.contains(&sp.port); - accrual = svc.accrual; - http_retry = svc.http_retry.clone(); - grpc_retry = svc.grpc_retry.clone(); - timeouts = svc.timeouts.clone(); + if let Some(resource) = resource_info.get(&resource_ref) { + opaque = resource.opaque_ports.contains(&rp.port); + accrual = resource.accrual; + http_retry = resource.http_retry.clone(); + grpc_retry = resource.grpc_retry.clone(); + timeouts = resource.timeouts.clone(); + + if let Some(traffic_policy) = resource.traffic_policy { + parent_info = ParentInfo::EgressNetwork { + traffic_policy, + name: resource_ref.name, + namespace: resource_ref.namespace, + } + } } - // The routes which target this Service but don't specify + // The routes which target this Resource but don't specify // a port apply to all ports. Therefore, we include them. let http_routes = self .service_http_routes - .get(&sp.service) + .get(&rp.name) .cloned() .unwrap_or_default(); let grpc_routes = self .service_grpc_routes - .get(&sp.service) + .get(&rp.name) + .cloned() + .unwrap_or_default(); + let tls_routes = self + .service_tls_routes + .get(&rp.name) + .cloned() + .unwrap_or_default(); + let tcp_routes = self + .service_tcp_routes + .get(&rp.name) .cloned() .unwrap_or_default(); - let mut service_routes = ServiceRoutes { + let mut resource_routes = ResourceRoutes { + parent_info, opaque, accrual, http_retry, grpc_retry, timeouts, - authority, - port: sp.port, - name: sp.service, + port: rp.port, namespace: self.namespace.clone(), watches_by_ns: Default::default(), }; @@ -618,34 +1160,57 @@ impl Namespace { let (producer_grpc_routes, consumer_grpc_routes): (Vec<_>, Vec<_>) = grpc_routes .into_iter() .partition(|(gknn, _)| gknn.namespace == *self.namespace); + let (producer_tls_routes, consumer_tls_routes): (Vec<_>, Vec<_>) = tls_routes + .into_iter() + .partition(|(gknn, _)| gknn.namespace == *self.namespace); + let (producer_tcp_routes, consumer_tcp_routes): (Vec<_>, Vec<_>) = tcp_routes + .into_iter() + .partition(|(gknn, _)| gknn.namespace == *self.namespace); for (consumer_gknn, consumer_route) in consumer_http_routes { // Consumer routes should only apply to watches from the // consumer namespace. - let consumer_watch = - service_routes.watch_for_ns_or_default(consumer_gknn.namespace.to_string()); + let consumer_watch = resource_routes + .watch_for_ns_or_default(consumer_gknn.namespace.to_string()); consumer_watch.insert_http_route(consumer_gknn.clone(), consumer_route.clone()); } for (consumer_gknn, consumer_route) in consumer_grpc_routes { // Consumer routes should only apply to watches from the // consumer namespace. - let consumer_watch = - service_routes.watch_for_ns_or_default(consumer_gknn.namespace.to_string()); + let consumer_watch = resource_routes + .watch_for_ns_or_default(consumer_gknn.namespace.to_string()); consumer_watch.insert_grpc_route(consumer_gknn.clone(), consumer_route.clone()); } + for (consumer_gknn, consumer_route) in consumer_tls_routes { + // Consumer routes should only apply to watches from the + // consumer namespace. + let consumer_watch = resource_routes + .watch_for_ns_or_default(consumer_gknn.namespace.to_string()); + + consumer_watch.insert_tls_route(consumer_gknn.clone(), consumer_route.clone()); + } + + for (consumer_gknn, consumer_route) in consumer_tcp_routes { + // Consumer routes should only apply to watches from the + // consumer namespace. + let consumer_watch = resource_routes + .watch_for_ns_or_default(consumer_gknn.namespace.to_string()); + + consumer_watch.insert_tcp_route(consumer_gknn.clone(), consumer_route.clone()); + } for (producer_gknn, producer_route) in producer_http_routes { // Insert the route into the producer namespace. - let producer_watch = - service_routes.watch_for_ns_or_default(producer_gknn.namespace.to_string()); + let producer_watch = resource_routes + .watch_for_ns_or_default(producer_gknn.namespace.to_string()); producer_watch.insert_http_route(producer_gknn.clone(), producer_route.clone()); // Producer routes apply to clients in all namespaces, so // apply it to watches for all other namespaces too. - service_routes + resource_routes .watches_by_ns .iter_mut() .filter(|(namespace, _)| { @@ -658,14 +1223,14 @@ impl Namespace { for (producer_gknn, producer_route) in producer_grpc_routes { // Insert the route into the producer namespace. - let producer_watch = - service_routes.watch_for_ns_or_default(producer_gknn.namespace.to_string()); + let producer_watch = resource_routes + .watch_for_ns_or_default(producer_gknn.namespace.to_string()); producer_watch.insert_grpc_route(producer_gknn.clone(), producer_route.clone()); // Producer routes apply to clients in all namespaces, so // apply it to watches for all other namespaces too. - service_routes + resource_routes .watches_by_ns .iter_mut() .filter(|(namespace, _)| { @@ -676,7 +1241,47 @@ impl Namespace { }); } - service_routes + for (producer_gknn, producer_route) in producer_tls_routes { + // Insert the route into the producer namespace. + let producer_watch = resource_routes + .watch_for_ns_or_default(producer_gknn.namespace.to_string()); + + producer_watch.insert_tls_route(producer_gknn.clone(), producer_route.clone()); + + // Producer routes apply to clients in all namespaces, so + // apply it to watches for all other namespaces too. + resource_routes + .watches_by_ns + .iter_mut() + .filter(|(namespace, _)| { + namespace.as_str() != producer_gknn.namespace.as_ref() + }) + .for_each(|(_, watch)| { + watch.insert_tls_route(producer_gknn.clone(), producer_route.clone()) + }); + } + + for (producer_gknn, producer_route) in producer_tcp_routes { + // Insert the route into the producer namespace. + let producer_watch = resource_routes + .watch_for_ns_or_default(producer_gknn.namespace.to_string()); + + producer_watch.insert_tcp_route(producer_gknn.clone(), producer_route.clone()); + + // Producer routes apply to clients in all namespaces, so + // apply it to watches for all other namespaces too. + resource_routes + .watches_by_ns + .iter_mut() + .filter(|(namespace, _)| { + namespace.as_str() != producer_gknn.namespace.as_ref() + }) + .for_each(|(_, watch)| { + watch.insert_tcp_route(producer_gknn.clone(), producer_route.clone()) + }); + } + + resource_routes }) } } @@ -725,7 +1330,7 @@ pub fn is_parent_service_or_egress_network(parent: &ParentReference) -> bool { } #[inline] -fn route_accepted_by_service( +fn route_accepted_by_parent( route_status: Option<&k8s_gateway_api::RouteStatus>, service: &str, ) -> bool { @@ -743,7 +1348,13 @@ fn route_accepted_by_service( }) } -impl ServiceRoutes { +impl ResourceRoutes { + fn reinitialize_watches(&mut self) { + for watch in self.watches_by_ns.values_mut() { + watch.reinitialize_watch(); + } + } + fn watch_for_ns_or_default(&mut self, namespace: String) -> &mut RoutesWatch { // The routes from the producer namespace apply to watches in all // namespaces, so we copy them. @@ -758,8 +1369,21 @@ impl ServiceRoutes { .map(|watch| watch.grpc_routes.clone()) .unwrap_or_default(); + let tls_routes = self + .watches_by_ns + .get(self.namespace.as_ref()) + .map(|watch| watch.tls_routes.clone()) + .unwrap_or_default(); + + let tcp_routes = self + .watches_by_ns + .get(self.namespace.as_ref()) + .map(|watch| watch.tcp_routes.clone()) + .unwrap_or_default(); + self.watches_by_ns.entry(namespace).or_insert_with(|| { let (sender, _) = watch::channel(OutboundPolicy { + parent_info: self.parent_info.clone(), port: self.port, opaque: self.opaque, accrual: self.accrual, @@ -768,14 +1392,16 @@ impl ServiceRoutes { timeouts: self.timeouts.clone(), http_routes: http_routes.clone(), grpc_routes: grpc_routes.clone(), - name: self.name.to_string(), - authority: self.authority.clone(), - namespace: self.namespace.to_string(), + tls_routes: tls_routes.clone(), + tcp_routes: tcp_routes.clone(), }); RoutesWatch { + parent_info: self.parent_info.clone(), http_routes, grpc_routes, + tls_routes, + tcp_routes, watch: sender, opaque: self.opaque, accrual: self.accrual, @@ -830,29 +1456,86 @@ impl ServiceRoutes { } } - fn update_service( + fn apply_tls_route(&mut self, gknn: GroupKindNamespaceName, route: TlsRoute) { + if *gknn.namespace == *self.namespace { + // This is a producer namespace route. + let watch = self.watch_for_ns_or_default(gknn.namespace.to_string()); + + watch.insert_tls_route(gknn.clone(), route.clone()); + + // Producer routes apply to clients in all namespaces, so + // apply it to watches for all other namespaces too. + for (ns, ns_watch) in self.watches_by_ns.iter_mut() { + if ns != &gknn.namespace { + ns_watch.insert_tls_route(gknn.clone(), route.clone()); + } + } + } else { + // This is a consumer namespace route and should only apply to + // watches from that namespace. + let watch = self.watch_for_ns_or_default(gknn.namespace.to_string()); + watch.insert_tls_route(gknn, route); + } + } + + fn apply_tcp_route(&mut self, gknn: GroupKindNamespaceName, route: TcpRoute) { + if *gknn.namespace == *self.namespace { + // This is a producer namespace route. + let watch = self.watch_for_ns_or_default(gknn.namespace.to_string()); + + watch.insert_tcp_route(gknn.clone(), route.clone()); + + // Producer routes apply to clients in all namespaces, so + // apply it to watches for all other namespaces too. + for (ns, ns_watch) in self.watches_by_ns.iter_mut() { + if ns != &gknn.namespace { + ns_watch.insert_tcp_route(gknn.clone(), route.clone()); + } + } + } else { + // This is a consumer namespace route and should only apply to + // watches from that namespace. + let watch = self.watch_for_ns_or_default(gknn.namespace.to_string()); + watch.insert_tcp_route(gknn, route); + } + } + + fn update_resource( &mut self, opaque: bool, accrual: Option, http_retry: Option>, grpc_retry: Option>, timeouts: RouteTimeouts, + traffic_policy: Option, ) { self.opaque = opaque; self.accrual = accrual; self.http_retry = http_retry.clone(); self.grpc_retry = grpc_retry.clone(); self.timeouts = timeouts.clone(); + self.update_traffic_policy(traffic_policy); for watch in self.watches_by_ns.values_mut() { watch.opaque = opaque; watch.accrual = accrual; watch.http_retry = http_retry.clone(); watch.grpc_retry = grpc_retry.clone(); watch.timeouts = timeouts.clone(); + watch.update_traffic_policy(traffic_policy); watch.send_if_modified(); } } + fn update_traffic_policy(&mut self, traffic_policy: Option) { + if let (ParentInfo::EgressNetwork { traffic_policy, .. }, Some(new)) = + (&mut self.parent_info, traffic_policy) + { + if *traffic_policy != new { + *traffic_policy = new; + } + } + } + fn delete_http_route(&mut self, gknn: &GroupKindNamespaceName) { for watch in self.watches_by_ns.values_mut() { watch.remove_http_route(gknn); @@ -864,13 +1547,46 @@ impl ServiceRoutes { watch.remove_grpc_route(gknn); } } + + fn delete_tls_route(&mut self, gknn: &GroupKindNamespaceName) { + for watch in self.watches_by_ns.values_mut() { + watch.remove_tls_route(gknn); + } + } + + fn delete_tcp_route(&mut self, gknn: &GroupKindNamespaceName) { + for watch in self.watches_by_ns.values_mut() { + watch.remove_tcp_route(gknn); + } + } } impl RoutesWatch { + fn reinitialize_watch(&mut self) { + let current_policy = self.watch.borrow().clone(); + let (new_sender, _) = watch::channel(current_policy); + self.watch = new_sender; + } + + fn update_traffic_policy(&mut self, traffic_policy: Option) { + if let (ParentInfo::EgressNetwork { traffic_policy, .. }, Some(new)) = + (&mut self.parent_info, traffic_policy) + { + if *traffic_policy != new { + *traffic_policy = new; + } + } + } + fn send_if_modified(&mut self) { self.watch.send_if_modified(|policy| { let mut modified = false; + if self.parent_info != policy.parent_info { + policy.parent_info = self.parent_info.clone(); + modified = true; + } + if self.http_routes != policy.http_routes { policy.http_routes = self.http_routes.clone(); modified = true; @@ -881,6 +1597,16 @@ impl RoutesWatch { modified = true; } + if self.tls_routes != policy.tls_routes { + policy.tls_routes = self.tls_routes.clone(); + modified = true; + } + + if self.tcp_routes != policy.tcp_routes { + policy.tcp_routes = self.tcp_routes.clone(); + modified = true; + } + if self.opaque != policy.opaque { policy.opaque = self.opaque; modified = true; @@ -922,6 +1648,18 @@ impl RoutesWatch { self.send_if_modified(); } + fn insert_tls_route(&mut self, gknn: GroupKindNamespaceName, route: TlsRoute) { + self.tls_routes.insert(gknn, route); + + self.send_if_modified(); + } + + fn insert_tcp_route(&mut self, gknn: GroupKindNamespaceName, route: TcpRoute) { + self.tcp_routes.insert(gknn, route); + + self.send_if_modified(); + } + fn remove_http_route(&mut self, gknn: &GroupKindNamespaceName) { self.http_routes.remove(gknn); self.send_if_modified(); @@ -931,6 +1669,16 @@ impl RoutesWatch { self.grpc_routes.remove(gknn); self.send_if_modified(); } + + fn remove_tls_route(&mut self, gknn: &GroupKindNamespaceName) { + self.tls_routes.remove(gknn); + self.send_if_modified(); + } + + fn remove_tcp_route(&mut self, gknn: &GroupKindNamespaceName) { + self.tcp_routes.remove(gknn); + self.send_if_modified(); + } } pub fn parse_accrual_config( @@ -1036,3 +1784,19 @@ fn parse_duration(s: &str) -> Result { .ok_or_else(|| anyhow::anyhow!("Timeout value {} overflows when converted to 'ms'", s))?; Ok(time::Duration::from_millis(ms)) } + +#[inline] +pub(crate) fn backend_kind( + backend: &k8s_gateway_api::BackendObjectReference, +) -> Option { + let group = backend.group.as_deref(); + // Backends default to `Service` if no kind is specified. + let kind = backend.kind.as_deref().unwrap_or("Service"); + if is_service(group, kind) { + Some(ResourceKind::Service) + } else if is_egress_network(group, kind) { + Some(ResourceKind::EgressNetwork) + } else { + None + } +} diff --git a/policy-controller/k8s/index/src/outbound/index/egress_network.rs b/policy-controller/k8s/index/src/outbound/index/egress_network.rs new file mode 100644 index 0000000000000..ef961e16e1b0b --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/index/egress_network.rs @@ -0,0 +1,275 @@ +use chrono::{offset::Utc, DateTime}; +use linkerd_policy_controller_k8s_api::policy::{Cidr, Network, TrafficPolicy}; +use linkerd_policy_controller_k8s_api::{policy as linkerd_k8s_api, ResourceExt}; +use std::net::IpAddr; + +#[derive(Debug)] +pub(crate) struct EgressNetwork { + pub networks: Vec, + pub name: String, + pub namespace: String, + pub creation_timestamp: Option>, + pub traffic_policy: TrafficPolicy, +} + +#[derive(Debug, PartialEq, Eq)] +struct MatchedEgressNetwork { + matched_network_size: usize, + name: String, + namespace: String, + creation_timestamp: Option>, + pub traffic_policy: TrafficPolicy, +} + +// === impl EgressNetwork === + +impl EgressNetwork { + pub(crate) fn from_resource( + r: &linkerd_k8s_api::EgressNetwork, + cluster_networks: Vec, + ) -> Self { + let name = r.name_unchecked(); + let namespace = r.namespace().expect("EgressNetwork must have a namespace"); + let creation_timestamp = r.creation_timestamp().map(|d| d.0); + let traffic_policy = r.spec.traffic_policy.clone(); + + let networks = r.spec.networks.clone().unwrap_or_else(|| { + let (v6, v4) = cluster_networks.iter().cloned().partition(Cidr::is_ipv6); + + vec![ + Network { + cidr: "0.0.0.0/0".parse().expect("should parse"), + except: Some(v4), + }, + Network { + cidr: "::/0".parse().expect("should parse"), + except: Some(v6), + }, + ] + }); + + EgressNetwork { + name, + namespace, + networks, + creation_timestamp, + traffic_policy, + } + } +} + +// Attempts to find the best matching network for a certain discovery look-up. +// Logic is: +// 1. if there are Egress networks in the source_namespace, only these are considered +// 2. the target IP is matched against the networks of the EgressNetwork +// 3. ambiguity is resolved as by comparing the networks using compare_matched_egress_network +pub(crate) fn resolve_egress_network<'n>( + addr: IpAddr, + source_namespace: String, + nets: impl Iterator, +) -> Option { + let (same_ns, rest): (Vec<_>, Vec<_>) = nets.partition(|un| un.namespace == source_namespace); + let to_pick_from = if !same_ns.is_empty() { same_ns } else { rest }; + + to_pick_from + .iter() + .filter_map(|egress_network| { + let matched_network_size = match_network(&egress_network.networks, addr)?; + Some(MatchedEgressNetwork { + name: egress_network.name.clone(), + namespace: egress_network.namespace.clone(), + matched_network_size, + creation_timestamp: egress_network.creation_timestamp, + traffic_policy: egress_network.traffic_policy.clone(), + }) + }) + .max_by(compare_matched_egress_network) + .map(|m| super::ResourceRef { + kind: super::ResourceKind::EgressNetwork, + name: m.name, + namespace: m.namespace, + }) +} + +// Finds a CIDR that contains the given IpAddr. When there are +// multiple CIDRS that match this criteria, the CIDR that is most +// specific (as in having the smallest address space) wins. +fn match_network(networks: &[Network], addr: IpAddr) -> Option { + networks + .iter() + .filter(|c| c.contains(addr)) + .min_by(|a, b| a.block_size().cmp(&b.block_size())) + .map(Network::block_size) +} + +// This logic compares two MatchedEgressNetwork objects with the purpose +// of picking the one that is more specific. The disambiguation rules are +// as follows: +// 1. prefer the more specific network match (smaller address space size) +// 2. prefer older resource +// 3. all being equal, rely on alphabetical sort of namespace/name +fn compare_matched_egress_network( + a: &MatchedEgressNetwork, + b: &MatchedEgressNetwork, +) -> std::cmp::Ordering { + b.matched_network_size + .cmp(&a.matched_network_size) + .then_with(|| a.creation_timestamp.cmp(&b.creation_timestamp).reverse()) + .then_with(|| a.namespace.cmp(&b.namespace).reverse()) + .then_with(|| a.name.cmp(&b.name).reverse()) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_picks_smallest_cidr() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "net-1".to_string(), + namespace: "ns".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/24".parse().unwrap(), + except: None, + }], + name: "net-2".to_string(), + namespace: "ns".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); + assert_eq!(resolved.unwrap().name, "net-2".to_string()) + } + + #[test] + fn test_picks_local_ns() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "net-1".to_string(), + namespace: "ns-1".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/24".parse().unwrap(), + except: None, + }], + name: "net-2".to_string(), + namespace: "ns".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns-1".into(), networks.iter()); + assert_eq!(resolved.unwrap().name, "net-1".to_string()) + } + + #[test] + fn test_picks_older_resource() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "net-1".to_string(), + namespace: "ns".to_string(), + creation_timestamp: Some(DateTime::::MAX_UTC), + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "net-2".to_string(), + namespace: "ns".to_string(), + creation_timestamp: Some(DateTime::::MIN_UTC), + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); + assert_eq!(resolved.unwrap().name, "net-2".to_string()) + } + + #[test] + fn test_picks_alphabetical_order() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "b".to_string(), + namespace: "a".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "d".to_string(), + namespace: "c".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); + assert_eq!(resolved.unwrap().name, "b".to_string()) + } + + #[test] + fn test_respects_exception() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: Some(vec!["192.168.0.4".parse().unwrap()]), + }], + name: "b".to_string(), + namespace: "a".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "d".to_string(), + namespace: "c".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); + assert_eq!(resolved.unwrap().name, "d".to_string()) + } +} diff --git a/policy-controller/k8s/index/src/outbound/index/grpc.rs b/policy-controller/k8s/index/src/outbound/index/grpc.rs index f9346686831cb..e4143c18d2d41 100644 --- a/policy-controller/k8s/index/src/outbound/index/grpc.rs +++ b/policy-controller/k8s/index/src/outbound/index/grpc.rs @@ -1,7 +1,7 @@ use std::time; use super::http::{convert_backend, convert_gateway_filter}; -use super::{parse_duration, parse_timeouts, ServiceInfo, ServiceRef}; +use super::{parse_duration, parse_timeouts, ResourceInfo, ResourceRef}; use crate::{routes, ClusterInfo}; use ahash::AHashMap as HashMap; use anyhow::{bail, Result}; @@ -16,7 +16,7 @@ pub(super) fn convert_route( ns: &str, route: gateway::GrpcRoute, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, ) -> Result { let timeouts = parse_timeouts(route.annotations())?; let retry = parse_grpc_retry(route.annotations())?; @@ -26,7 +26,7 @@ pub(super) fn convert_route( .hostnames .into_iter() .flatten() - .map(routes::http::host_match) + .map(routes::host_match) .collect(); let rules = route @@ -39,7 +39,7 @@ pub(super) fn convert_route( ns, rule, cluster, - service_info, + resource_info, timeouts.clone(), retry.clone(), ) @@ -59,7 +59,7 @@ fn convert_rule( ns: &str, rule: gateway::GrpcRouteRule, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, timeouts: RouteTimeouts, retry: Option>, ) -> Result> { @@ -74,7 +74,7 @@ fn convert_rule( .backend_refs .into_iter() .flatten() - .filter_map(|b| convert_backend(ns, b, cluster, service_info)) + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) .collect(); let filters = rule diff --git a/policy-controller/k8s/index/src/outbound/index/http.rs b/policy-controller/k8s/index/src/outbound/index/http.rs index 8351d1136f255..b394e7b413246 100644 --- a/policy-controller/k8s/index/src/outbound/index/http.rs +++ b/policy-controller/k8s/index/src/outbound/index/http.rs @@ -1,6 +1,6 @@ use std::{num::NonZeroU16, time}; -use super::{is_service, parse_duration, parse_timeouts, ServiceInfo, ServiceRef}; +use super::{parse_duration, parse_timeouts, ResourceInfo, ResourceKind, ResourceRef}; use crate::{ routes::{self, HttpRouteResource}, ClusterInfo, @@ -11,7 +11,7 @@ use kube::ResourceExt; use linkerd_policy_controller_core::{ outbound::{ Backend, Filter, HttpRetryCondition, OutboundRoute, OutboundRouteRule, RouteRetry, - RouteTimeouts, WeightedService, + RouteTimeouts, WeightedEgressNetwork, WeightedService, }, routes::HttpRouteMatch, }; @@ -21,7 +21,7 @@ pub(super) fn convert_route( ns: &str, route: HttpRouteResource, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, ) -> Result> { match route { HttpRouteResource::LinkerdHttp(route) => { @@ -33,7 +33,7 @@ pub(super) fn convert_route( .hostnames .into_iter() .flatten() - .map(routes::http::host_match) + .map(routes::host_match) .collect(); let rules = route @@ -46,7 +46,7 @@ pub(super) fn convert_route( ns, r, cluster, - service_info, + resource_info, timeouts.clone(), retry.clone(), ) @@ -70,7 +70,7 @@ pub(super) fn convert_route( .hostnames .into_iter() .flatten() - .map(routes::http::host_match) + .map(routes::host_match) .collect(); let rules = route @@ -83,7 +83,7 @@ pub(super) fn convert_route( ns, r, cluster, - service_info, + resource_info, timeouts.clone(), retry.clone(), ) @@ -105,7 +105,7 @@ fn convert_linkerd_rule( ns: &str, rule: policy::httproute::HttpRouteRule, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, mut timeouts: RouteTimeouts, retry: Option>, ) -> Result> { @@ -120,7 +120,7 @@ fn convert_linkerd_rule( .backend_refs .into_iter() .flatten() - .filter_map(|b| convert_backend(ns, b, cluster, service_info)) + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) .collect(); let filters = rule @@ -156,7 +156,7 @@ fn convert_gateway_rule( ns: &str, rule: gateway::HttpRouteRule, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, timeouts: RouteTimeouts, retry: Option>, ) -> Result> { @@ -171,7 +171,7 @@ fn convert_gateway_rule( .backend_refs .into_iter() .flatten() - .filter_map(|b| convert_backend(ns, b, cluster, service_info)) + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) .collect(); let filters = rule @@ -194,46 +194,35 @@ pub(super) fn convert_backend>( ns: &str, backend: BackendRef, cluster: &ClusterInfo, - services: &HashMap, + resources: &HashMap, ) -> Option { let backend = backend.into(); let filters = backend.filters; let backend = backend.backend_ref?; - if !is_backend_service(&backend.inner) { - return Some(Backend::Invalid { - weight: backend.weight.unwrap_or(1).into(), - message: format!( - "unsupported backend type {group} {kind}", - group = backend.inner.group.as_deref().unwrap_or("core"), - kind = backend.inner.kind.as_deref().unwrap_or(""), - ), - }); - } - - let name = backend.inner.name; - let weight = backend.weight.unwrap_or(1); - // The gateway API dictates: - // - // Port is required when the referent is a Kubernetes Service. - let port = match backend - .inner - .port - .and_then(|p| NonZeroU16::try_from(p).ok()) - { - Some(port) => port, + let backend_kind = match super::backend_kind(&backend.inner) { + Some(backend_kind) => backend_kind, None => { return Some(Backend::Invalid { - weight: weight.into(), - message: format!("missing port for backend Service {name}"), - }) + weight: backend.weight.unwrap_or(1).into(), + message: format!( + "unsupported backend type {group} {kind}", + group = backend.inner.group.as_deref().unwrap_or("core"), + kind = backend.inner.kind.as_deref().unwrap_or(""), + ), + }); } }; - let service_ref = ServiceRef { - name: name.clone(), + + let backend_ref = ResourceRef { + name: backend.inner.name.clone(), namespace: backend.inner.namespace.unwrap_or_else(|| ns.to_string()), + kind: backend_kind.clone(), }; + let name = backend.inner.name; + let weight = backend.weight.unwrap_or(1); + let filters = match filters .into_iter() .flatten() @@ -249,15 +238,45 @@ pub(super) fn convert_backend>( } }; - Some(Backend::Service(WeightedService { - weight: weight.into(), - authority: cluster.service_dns_authority(&service_ref.namespace, &name, port), - name, - namespace: service_ref.namespace.to_string(), - port, - filters, - exists: services.contains_key(&service_ref), - })) + let port = backend + .inner + .port + .and_then(|p| NonZeroU16::try_from(p).ok()); + + match backend_kind { + ResourceKind::Service => { + // The gateway API dictates: + // + // Port is required when the referent is a Kubernetes Service. + let port = match port { + Some(port) => port, + None => { + return Some(Backend::Invalid { + weight: weight.into(), + message: format!("missing port for backend Service {name}"), + }) + } + }; + + Some(Backend::Service(WeightedService { + weight: weight.into(), + authority: cluster.service_dns_authority(&backend_ref.namespace, &name, port), + name, + namespace: backend_ref.namespace.to_string(), + port, + filters, + exists: resources.contains_key(&backend_ref), + })) + } + ResourceKind::EgressNetwork => Some(Backend::EgressNetwork(WeightedEgressNetwork { + weight: weight.into(), + name, + namespace: backend_ref.namespace.to_string(), + port, + filters, + exists: resources.contains_key(&backend_ref), + })), + } } fn convert_linkerd_filter(filter: policy::httproute::HttpRouteFilter) -> Result { @@ -320,15 +339,6 @@ pub(crate) fn convert_gateway_filter Ok(filter) } -#[inline] -fn is_backend_service(backend: &gateway::BackendObjectReference) -> bool { - is_service( - backend.group.as_deref(), - // Backends default to `Service` if no kind is specified. - backend.kind.as_deref().unwrap_or("Service"), - ) -} - pub fn parse_http_retry( annotations: &std::collections::BTreeMap, ) -> Result>> { diff --git a/policy-controller/k8s/index/src/outbound/index/metrics.rs b/policy-controller/k8s/index/src/outbound/index/metrics.rs index d7f698a78cc0d..55e6730344100 100644 --- a/policy-controller/k8s/index/src/outbound/index/metrics.rs +++ b/policy-controller/k8s/index/src/outbound/index/metrics.rs @@ -33,7 +33,7 @@ impl Collector for Instrumented { None, MetricType::Gauge, )?; - let service_infos = ConstGauge::new(this.service_info.len() as u32); + let service_infos = ConstGauge::new(this.resource_info.len() as u32); service_infos.encode(service_info_encoder)?; let mut service_route_encoder = encoder.encode_descriptor( @@ -57,7 +57,7 @@ impl Collector for Instrumented { )?; for (ns, index) in &this.namespaces.by_ns { let labels = vec![("namespace", ns.as_str())]; - let service_port_routes = ConstGauge::new(index.service_port_routes.len() as u32); + let service_port_routes = ConstGauge::new(index.resource_port_routes.len() as u32); let service_port_route_encoder = service_port_route_encoder.encode_family(&labels)?; service_port_routes.encode(service_port_route_encoder)?; } diff --git a/policy-controller/k8s/index/src/outbound/index/tcp.rs b/policy-controller/k8s/index/src/outbound/index/tcp.rs new file mode 100644 index 0000000000000..4736a9137c3a5 --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/index/tcp.rs @@ -0,0 +1,106 @@ +use std::num::NonZeroU16; + +use super::{ResourceInfo, ResourceKind, ResourceRef}; +use crate::ClusterInfo; +use ahash::AHashMap as HashMap; +use anyhow::{bail, Result}; +use linkerd_policy_controller_core::outbound::{Backend, WeightedEgressNetwork, WeightedService}; +use linkerd_policy_controller_core::outbound::{TcpRoute, TcpRouteRule}; +use linkerd_policy_controller_k8s_api::{gateway, Time}; + +pub(super) fn convert_route( + ns: &str, + route: gateway::TcpRoute, + cluster: &ClusterInfo, + resource_info: &HashMap, +) -> Result { + if route.spec.rules.len() != 1 { + bail!("TCPRoute needs to have one rule"); + } + + let rule = route.spec.rules.first().expect("already checked"); + + let backends = rule + .backend_refs + .clone() + .into_iter() + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) + .collect(); + + let creation_timestamp = route.metadata.creation_timestamp.map(|Time(t)| t); + + Ok(TcpRoute { + rule: TcpRouteRule { backends }, + creation_timestamp, + }) +} + +pub(super) fn convert_backend( + ns: &str, + backend: gateway::BackendRef, + cluster: &ClusterInfo, + resources: &HashMap, +) -> Option { + let backend_kind = match super::backend_kind(&backend.inner) { + Some(backend_kind) => backend_kind, + None => { + return Some(Backend::Invalid { + weight: backend.weight.unwrap_or(1).into(), + message: format!( + "unsupported backend type {group} {kind}", + group = backend.inner.group.as_deref().unwrap_or("core"), + kind = backend.inner.kind.as_deref().unwrap_or(""), + ), + }); + } + }; + + let backend_ref = ResourceRef { + name: backend.inner.name.clone(), + namespace: backend.inner.namespace.unwrap_or_else(|| ns.to_string()), + kind: backend_kind.clone(), + }; + + let name = backend.inner.name; + let weight = backend.weight.unwrap_or(1); + + let port = backend + .inner + .port + .and_then(|p| NonZeroU16::try_from(p).ok()); + + match backend_kind { + ResourceKind::Service => { + // The gateway API dictates: + // + // Port is required when the referent is a Kubernetes Service. + let port = match port { + Some(port) => port, + None => { + return Some(Backend::Invalid { + weight: weight.into(), + message: format!("missing port for backend Service {name}"), + }) + } + }; + + Some(Backend::Service(WeightedService { + weight: weight.into(), + authority: cluster.service_dns_authority(&backend_ref.namespace, &name, port), + name, + namespace: backend_ref.namespace.to_string(), + port, + filters: vec![], + exists: resources.contains_key(&backend_ref), + })) + } + ResourceKind::EgressNetwork => Some(Backend::EgressNetwork(WeightedEgressNetwork { + weight: weight.into(), + name, + namespace: backend_ref.namespace.to_string(), + port, + filters: vec![], + exists: resources.contains_key(&backend_ref), + })), + } +} diff --git a/policy-controller/k8s/index/src/outbound/index/tls.rs b/policy-controller/k8s/index/src/outbound/index/tls.rs new file mode 100644 index 0000000000000..393c5ef9db3c9 --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/index/tls.rs @@ -0,0 +1,43 @@ +use super::tcp::convert_backend; +use super::{ResourceInfo, ResourceRef}; +use crate::{routes, ClusterInfo}; +use ahash::AHashMap as HashMap; +use anyhow::{bail, Result}; +use linkerd_policy_controller_core::outbound::{TcpRouteRule, TlsRoute}; +use linkerd_policy_controller_k8s_api::{gateway, Time}; + +pub(super) fn convert_route( + ns: &str, + route: gateway::TlsRoute, + cluster: &ClusterInfo, + resource_info: &HashMap, +) -> Result { + if route.spec.rules.len() != 1 { + bail!("TLSRoute needs to have one rule"); + } + + let rule = route.spec.rules.first().expect("already checked"); + + let hostnames = route + .spec + .hostnames + .into_iter() + .flatten() + .map(routes::host_match) + .collect(); + + let backends = rule + .backend_refs + .clone() + .into_iter() + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) + .collect(); + + let creation_timestamp = route.metadata.creation_timestamp.map(|Time(t)| t); + + Ok(TlsRoute { + hostnames, + rule: TcpRouteRule { backends }, + creation_timestamp, + }) +} diff --git a/policy-controller/k8s/index/src/outbound/tests.rs b/policy-controller/k8s/index/src/outbound/tests.rs index 552eaf05d1be4..6994c2a96fa7d 100644 --- a/policy-controller/k8s/index/src/outbound/tests.rs +++ b/policy-controller/k8s/index/src/outbound/tests.rs @@ -1,14 +1,20 @@ -use std::sync::Arc; +use std::{sync::Arc, vec}; use crate::{ defaults::DefaultPolicy, outbound::index::{Index, SharedIndex}, ClusterInfo, }; +use k8s_openapi::chrono::Utc; use kubert::index::IndexNamespacedResource; +use linkerd_policy_controller_core::outbound::{Kind, ResourceTarget}; use linkerd_policy_controller_core::IpNet; -use linkerd_policy_controller_k8s_api::{self as k8s}; +use linkerd_policy_controller_k8s_api::{ + self as k8s, + policy::{self, EgressNetwork}, +}; use tokio::time; +use tracing::Level; mod routes; @@ -34,6 +40,30 @@ pub fn mk_service(ns: impl ToString, name: impl ToString, port: i32) -> k8s::Ser } } +pub fn mk_egress_network(ns: impl ToString, name: impl ToString) -> policy::EgressNetwork { + policy::EgressNetwork { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + ..Default::default() + }, + spec: policy::EgressNetworkSpec { + traffic_policy: policy::TrafficPolicy::Allow, + networks: None, + }, + status: Some(policy::EgressNetworkStatus { + conditions: vec![k8s::Condition { + last_transition_time: k8s::Time(Utc::now()), + message: "".to_string(), + observed_generation: None, + reason: "Accepted".to_string(), + status: "True".to_string(), + type_: "Accepted".to_string(), + }], + }), + } +} + impl TestConfig { fn from_default_policy(default_policy: DefaultPolicy) -> Self { Self::from_default_policy_with_probes(default_policy, vec![]) @@ -68,3 +98,119 @@ impl Default for TestConfig { }) } } + +#[test] +fn switch_to_another_egress_network_parent() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create network b. + let network_b = mk_egress_network("ns", "b"); + test.index.write().apply(network_b); + + let (ns, name) = test + .index + .write() + .lookup_egress_network("192.168.0.1".parse().unwrap(), "ns".to_string()) + .expect("should resolve"); + + assert_eq!(ns, "ns".to_string()); + assert_eq!(name, "b".to_string()); + + let mut rx_b = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name, + namespace: ns.clone(), + port: 8080.try_into().unwrap(), + source_namespace: ns, + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), + }) + .expect("b.ns should exist"); + + // first resolution is for network B + let policy_b = rx_b.borrow_and_update(); + assert_eq!(policy_b.parent_namespace(), "ns"); + assert_eq!(policy_b.parent_name(), "b"); + drop(policy_b); + + // Create network a. + let network_a = mk_egress_network("ns", "a"); + test.index.write().apply(network_a); + + // watch should be dropped at this point + assert!(rx_b.has_changed().is_err()); + + // now a new resolution should resolve network a + + let (ns, name) = test + .index + .write() + .lookup_egress_network("192.168.0.1".parse().unwrap(), "ns".to_string()) + .expect("should resolve"); + + let mut rx_a = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name, + namespace: ns.clone(), + port: 8080.try_into().unwrap(), + source_namespace: ns, + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), + }) + .expect("a.ns should exist"); + + // second resolution is for network A + let policy_b = rx_a.borrow_and_update(); + assert_eq!(policy_b.parent_namespace(), "ns"); + assert_eq!(policy_b.parent_name(), "a"); +} + +#[test] +fn fallback_rx_closed_when_egress_net_created() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + + let fallback_rx = test.index.read().fallback_policy_rx(); + assert!(fallback_rx.has_changed().is_ok()); + + // Create network. + let network = mk_egress_network("ns", "egress-net"); + test.index.write().apply(network); + + assert!(fallback_rx.has_changed().is_err()); +} + +#[test] +fn fallback_rx_closed_when_egress_net_deleted() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + + // Create network. + let network = mk_egress_network("ns", "egress-net"); + test.index.write().apply(network); + + let fallback_rx = test.index.read().fallback_policy_rx(); + assert!(fallback_rx.has_changed().is_ok()); + + >::delete( + &mut test.index.write(), + "ns".into(), + "egress-net".into(), + ); + + assert!(fallback_rx.has_changed().is_err()); +} diff --git a/policy-controller/k8s/index/src/outbound/tests/routes.rs b/policy-controller/k8s/index/src/outbound/tests/routes.rs index f4e23ff7ca636..ce47732b3c5c5 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes.rs @@ -1,2 +1,9 @@ mod grpc; mod http; +mod tcp; +mod tls; + +enum BackendKind { + Egress, + Service, +} diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs b/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs index d8245bdb10bd4..ff2051b8e60d1 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs @@ -1,6 +1,6 @@ use kube::Resource; use linkerd_policy_controller_core::{ - outbound::{Backend, WeightedService}, + outbound::{Backend, Kind, ResourceTarget, WeightedEgressNetwork, WeightedService}, routes::GroupKindNamespaceName, POLICY_CONTROLLER_NAME, }; @@ -22,18 +22,26 @@ fn backend_service() { test.index.write().apply(apex); // Create httproute. - let route = mk_route("ns", "route", 8080, "apex", "backend"); + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "backend", + super::BackendKind::Service, + ); test.index.write().apply(route); let mut rx = test .index .write() - .outbound_policy_rx( - "apex".to_string(), - "ns".to_string(), - 8080.try_into().unwrap(), - "ns".to_string(), - ) + .outbound_policy_rx(ResourceTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::Service, + }) .expect("apex.ns should exist"); { @@ -96,15 +104,85 @@ fn backend_service() { } } +#[test] +fn backend_egress_network() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_egress_network("ns", "apex"); + test.index.write().apply(apex); + + // Create httproute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "apex", + super::BackendKind::Egress, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .grpc_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::GrpcRoute::group(&()), + kind: k8s_gateway_api::GrpcRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rules + .first() + .expect("rule should exist") + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Invalid { .. } => &false, + Backend::EgressNetwork(WeightedEgressNetwork { exists, .. }) => exists, + _ => panic!("backend should be an egress network, but got {backend:?}"), + }; + + // Backend should exist. + assert!(exists); + } +} + fn mk_route( ns: impl ToString, name: impl ToString, port: u16, parent: impl ToString, - backend: impl ToString, + backend_name: impl ToString, + backend: super::BackendKind, ) -> k8s_gateway_api::GrpcRoute { - use chrono::Utc; use k8s::{policy::httproute::*, Time}; + let (group, kind) = match backend { + super::BackendKind::Service => ("core".to_string(), "Service".to_string()), + super::BackendKind::Egress => { + ("policy.linkerd.io".to_string(), "EgressNetwork".to_string()) + } + }; k8s_gateway_api::GrpcRoute { metadata: k8s::ObjectMeta { @@ -116,8 +194,8 @@ fn mk_route( spec: k8s_gateway_api::GrpcRouteSpec { inner: CommonRouteSpec { parent_refs: Some(vec![ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), name: parent.to_string(), section_name: None, @@ -138,10 +216,10 @@ fn mk_route( filters: None, weight: None, inner: BackendObjectReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), - name: backend.to_string(), + name: backend_name.to_string(), port: Some(port), }, }]), @@ -151,8 +229,8 @@ fn mk_route( inner: RouteStatus { parents: vec![k8s::gateway::RouteParentStatus { parent_ref: ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), name: parent.to_string(), section_name: None, diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/http.rs b/policy-controller/k8s/index/src/outbound/tests/routes/http.rs index dc672b339781f..96107e51d8fb0 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes/http.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes/http.rs @@ -1,6 +1,6 @@ use kube::Resource; use linkerd_policy_controller_core::{ - outbound::{Backend, WeightedService}, + outbound::{Backend, Kind, ResourceTarget, WeightedEgressNetwork, WeightedService}, routes::GroupKindNamespaceName, POLICY_CONTROLLER_NAME, }; @@ -23,18 +23,26 @@ fn backend_service() { test.index.write().apply(apex); // Create httproute. - let route = mk_route("ns", "route", 8080, "apex", "backend"); + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "backend", + super::BackendKind::Service, + ); test.index.write().apply(route); let mut rx = test .index .write() - .outbound_policy_rx( - "apex".to_string(), - "ns".to_string(), - 8080.try_into().unwrap(), - "ns".to_string(), - ) + .outbound_policy_rx(ResourceTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::Service, + }) .expect("apex.ns should exist"); { @@ -98,15 +106,86 @@ fn backend_service() { } } +#[test] +fn backend_egress_network() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + + // Create apex service. + let apex = mk_egress_network("ns", "apex"); + test.index.write().apply(apex); + + // Create httproute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "apex", + super::BackendKind::Egress, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .http_routes + .get(&GroupKindNamespaceName { + group: k8s::policy::HttpRoute::group(&()), + kind: k8s::policy::HttpRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rules + .first() + .expect("rule should exist") + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Invalid { .. } => &false, + Backend::EgressNetwork(WeightedEgressNetwork { exists, .. }) => exists, + _ => panic!("backend should be an egress network, but got {backend:?}"), + }; + + // Backend should exist. + assert!(exists); + } +} + fn mk_route( ns: impl ToString, name: impl ToString, port: u16, parent: impl ToString, - backend: impl ToString, + backend_name: impl ToString, + backend: super::BackendKind, ) -> k8s::policy::HttpRoute { - use chrono::Utc; use k8s::{policy::httproute::*, Time}; + let (group, kind) = match backend { + super::BackendKind::Service => ("core".to_string(), "Service".to_string()), + super::BackendKind::Egress => { + ("policy.linkerd.io".to_string(), "EgressNetwork".to_string()) + } + }; HttpRoute { metadata: k8s::ObjectMeta { @@ -118,8 +197,8 @@ fn mk_route( spec: HttpRouteSpec { inner: CommonRouteSpec { parent_refs: Some(vec![ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), name: parent.to_string(), section_name: None, @@ -141,10 +220,10 @@ fn mk_route( backend_ref: Some(BackendRef { weight: None, inner: BackendObjectReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), - name: backend.to_string(), + name: backend_name.to_string(), port: Some(port), }, }), @@ -157,8 +236,8 @@ fn mk_route( inner: RouteStatus { parents: vec![k8s::gateway::RouteParentStatus { parent_ref: ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group), + kind: Some(kind), namespace: Some(ns.to_string()), name: parent.to_string(), section_name: None, diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs b/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs new file mode 100644 index 0000000000000..f9d12e2e6eed3 --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs @@ -0,0 +1,236 @@ +use kube::Resource; +use linkerd_policy_controller_core::{ + outbound::{Backend, Kind, ResourceTarget, WeightedEgressNetwork, WeightedService}, + routes::GroupKindNamespaceName, + POLICY_CONTROLLER_NAME, +}; +use linkerd_policy_controller_k8s_api::gateway as k8s_gateway_api; +use tracing::Level; + +use super::super::*; + +#[test] +fn backend_service() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_service("ns", "apex", 8080); + test.index.write().apply(apex); + + // Create tcproute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "backend", + super::BackendKind::Service, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::Service, + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tcp_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TcpRoute::group(&()), + kind: k8s_gateway_api::TcpRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Service(WeightedService { exists, .. }) => exists, + _ => panic!("backend should be a service"), + }; + + // Backend should not exist. + assert!(!exists); + } + + // Create backend service. + let backend = mk_service("ns", "backend", 8080); + test.index.write().apply(backend); + assert!(rx.has_changed().unwrap()); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tcp_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TcpRoute::group(&()), + kind: k8s_gateway_api::TcpRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Service(WeightedService { exists, .. }) => exists, + _ => panic!("backend should be a service"), + }; + + // Backend should exist. + assert!(exists); + } +} + +#[test] +fn backend_egress_network() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_egress_network("ns", "apex"); + test.index.write().apply(apex); + + // Create tcproute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "apex", + super::BackendKind::Egress, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tcp_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TcpRoute::group(&()), + kind: k8s_gateway_api::TcpRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Invalid { .. } => &false, + Backend::EgressNetwork(WeightedEgressNetwork { exists, .. }) => exists, + _ => panic!("backend should be an egress network, but got {backend:?}"), + }; + + // Backend should exist. + assert!(exists); + } +} + +fn mk_route( + ns: impl ToString, + name: impl ToString, + port: u16, + parent: impl ToString, + backend_name: impl ToString, + backend: super::BackendKind, +) -> k8s_gateway_api::TcpRoute { + use k8s::{policy::httproute::*, Time}; + let (group, kind) = match backend { + super::BackendKind::Service => ("core".to_string(), "Service".to_string()), + super::BackendKind::Egress => { + ("policy.linkerd.io".to_string(), "EgressNetwork".to_string()) + } + }; + + k8s_gateway_api::TcpRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + creation_timestamp: Some(Time(Utc::now())), + ..Default::default() + }, + spec: k8s_gateway_api::TcpRouteSpec { + inner: CommonRouteSpec { + parent_refs: Some(vec![ParentReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: parent.to_string(), + section_name: None, + port: Some(port), + }]), + }, + rules: vec![k8s_gateway_api::TcpRouteRule { + backend_refs: vec![k8s_gateway_api::BackendRef { + weight: None, + inner: BackendObjectReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: backend_name.to_string(), + port: Some(port), + }, + }], + }], + }, + status: Some(k8s_gateway_api::TcpRouteStatus { + inner: RouteStatus { + parents: vec![k8s::gateway::RouteParentStatus { + parent_ref: ParentReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: parent.to_string(), + section_name: None, + port: Some(port), + }, + controller_name: POLICY_CONTROLLER_NAME.to_string(), + conditions: vec![k8s::Condition { + last_transition_time: Time(chrono::DateTime::::MIN_UTC), + message: "".to_string(), + observed_generation: None, + reason: "Accepted".to_string(), + status: "True".to_string(), + type_: "Accepted".to_string(), + }], + }], + }, + }), + } +} diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs b/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs new file mode 100644 index 0000000000000..914d7307ed79b --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs @@ -0,0 +1,237 @@ +use kube::Resource; +use linkerd_policy_controller_core::{ + outbound::{Backend, Kind, ResourceTarget, WeightedEgressNetwork, WeightedService}, + routes::GroupKindNamespaceName, + POLICY_CONTROLLER_NAME, +}; +use linkerd_policy_controller_k8s_api::gateway as k8s_gateway_api; +use tracing::Level; + +use super::super::*; + +#[test] +fn backend_service() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_service("ns", "apex", 8080); + test.index.write().apply(apex); + + // Create tlsroute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "backend", + super::BackendKind::Service, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::Service, + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tls_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TlsRoute::group(&()), + kind: k8s_gateway_api::TlsRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Service(WeightedService { exists, .. }) => exists, + _ => panic!("backend should be a service"), + }; + + // Backend should not exist. + assert!(!exists); + } + + // Create backend service. + let backend = mk_service("ns", "backend", 8080); + test.index.write().apply(backend); + assert!(rx.has_changed().unwrap()); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tls_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TlsRoute::group(&()), + kind: k8s_gateway_api::TlsRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Service(WeightedService { exists, .. }) => exists, + _ => panic!("backend should be a service"), + }; + + // Backend should exist. + assert!(exists); + } +} + +#[test] +fn backend_egress_network() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_egress_network("ns", "apex"); + test.index.write().apply(apex); + + // Create tlsroute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "apex", + super::BackendKind::Egress, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tls_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TlsRoute::group(&()), + kind: k8s_gateway_api::TlsRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Invalid { .. } => &false, + Backend::EgressNetwork(WeightedEgressNetwork { exists, .. }) => exists, + _ => panic!("backend should be an egress network, but got {backend:?}"), + }; + + // Backend should exist. + assert!(exists); + } +} + +fn mk_route( + ns: impl ToString, + name: impl ToString, + port: u16, + parent: impl ToString, + backend_name: impl ToString, + backend: super::BackendKind, +) -> k8s_gateway_api::TlsRoute { + use k8s::{policy::httproute::*, Time}; + let (group, kind) = match backend { + super::BackendKind::Service => ("core".to_string(), "Service".to_string()), + super::BackendKind::Egress => { + ("policy.linkerd.io".to_string(), "EgressNetwork".to_string()) + } + }; + + k8s_gateway_api::TlsRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + creation_timestamp: Some(Time(Utc::now())), + ..Default::default() + }, + spec: k8s_gateway_api::TlsRouteSpec { + inner: CommonRouteSpec { + parent_refs: Some(vec![ParentReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: parent.to_string(), + section_name: None, + port: Some(port), + }]), + }, + hostnames: None, + rules: vec![k8s_gateway_api::TlsRouteRule { + backend_refs: vec![k8s_gateway_api::BackendRef { + weight: None, + inner: BackendObjectReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: backend_name.to_string(), + port: Some(port), + }, + }], + }], + }, + status: Some(k8s_gateway_api::TlsRouteStatus { + inner: RouteStatus { + parents: vec![k8s::gateway::RouteParentStatus { + parent_ref: ParentReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: parent.to_string(), + section_name: None, + port: Some(port), + }, + controller_name: POLICY_CONTROLLER_NAME.to_string(), + conditions: vec![k8s::Condition { + last_transition_time: Time(chrono::DateTime::::MIN_UTC), + message: "".to_string(), + observed_generation: None, + reason: "Accepted".to_string(), + status: "True".to_string(), + type_: "Accepted".to_string(), + }], + }], + }, + }), + } +} diff --git a/policy-controller/k8s/index/src/routes.rs b/policy-controller/k8s/index/src/routes.rs index 538d2bf4e8513..37715717cf3af 100644 --- a/policy-controller/k8s/index/src/routes.rs +++ b/policy-controller/k8s/index/src/routes.rs @@ -1,4 +1,4 @@ -use linkerd_policy_controller_core::routes::{GroupKindName, GroupKindNamespaceName}; +use linkerd_policy_controller_core::routes::{GroupKindName, GroupKindNamespaceName, HostMatch}; use linkerd_policy_controller_k8s_api::{gateway as api, policy, Resource, ResourceExt}; pub mod grpc; @@ -77,3 +77,17 @@ impl ExplicitGKN for str { GroupKindName { group, kind, name } } } + +pub fn host_match(hostname: api::Hostname) -> HostMatch { + if hostname.starts_with("*.") { + let mut reverse_labels = hostname + .split('.') + .skip(1) + .map(|label| label.to_string()) + .collect::>(); + reverse_labels.reverse(); + HostMatch::Suffix { reverse_labels } + } else { + HostMatch::Exact(hostname) + } +} diff --git a/policy-controller/k8s/index/src/routes/http.rs b/policy-controller/k8s/index/src/routes/http.rs index d01839f6f566a..4f24405906919 100644 --- a/policy-controller/k8s/index/src/routes/http.rs +++ b/policy-controller/k8s/index/src/routes/http.rs @@ -54,20 +54,6 @@ pub fn path_match(path_match: api::HttpPathMatch) -> Result { } } -pub fn host_match(hostname: api::Hostname) -> routes::HostMatch { - if hostname.starts_with("*.") { - let mut reverse_labels = hostname - .split('.') - .skip(1) - .map(|label| label.to_string()) - .collect::>(); - reverse_labels.reverse(); - routes::HostMatch::Suffix { reverse_labels } - } else { - routes::HostMatch::Exact(hostname) - } -} - pub fn header_match(header_match: api::HttpHeaderMatch) -> Result { match header_match { api::HttpHeaderMatch::Exact { name, value } => { diff --git a/policy-controller/src/lib.rs b/policy-controller/src/lib.rs index d524a6578ed51..4a0fc24080b3d 100644 --- a/policy-controller/src/lib.rs +++ b/policy-controller/src/lib.rs @@ -9,12 +9,14 @@ use linkerd_policy_controller_core::inbound::{ DiscoverInboundServer, InboundServer, InboundServerStream, }; use linkerd_policy_controller_core::outbound::{ - DiscoverOutboundPolicy, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, + DiscoverOutboundPolicy, ExternalPolicyStream, Kind, OutboundDiscoverTarget, OutboundPolicy, + OutboundPolicyStream, ResourceTarget, }; pub use linkerd_policy_controller_core::IpNet; pub use linkerd_policy_controller_grpc as grpc; pub use linkerd_policy_controller_k8s_api as k8s; pub use linkerd_policy_controller_k8s_index::{inbound, outbound, ClusterInfo, DefaultPolicy}; +use std::net::SocketAddr; use std::{net::IpAddr, num::NonZeroU16}; #[derive(Clone, Debug)] @@ -84,68 +86,73 @@ impl DiscoverInboundServer<(grpc::workload::Workload, NonZeroU16)> for InboundDi } #[async_trait::async_trait] -impl DiscoverOutboundPolicy for OutboundDiscover { +impl DiscoverOutboundPolicy for OutboundDiscover { async fn get_outbound_policy( &self, - OutboundDiscoverTarget { - service_name, - service_namespace, - service_port, - source_namespace, - }: OutboundDiscoverTarget, + resource: ResourceTarget, ) -> Result> { - let rx = match self.0.write().outbound_policy_rx( - service_name, - service_namespace, - service_port, - source_namespace, - ) { + let rx = match self.0.write().outbound_policy_rx(resource.clone()) { Ok(rx) => rx, Err(error) => { tracing::error!(%error, "failed to get outbound policy rx"); return Ok(None); } }; + let policy = (*rx.borrow()).clone(); Ok(Some(policy)) } async fn watch_outbound_policy( &self, - OutboundDiscoverTarget { - service_name, - service_namespace, - service_port, - source_namespace, - }: OutboundDiscoverTarget, + target: ResourceTarget, ) -> Result> { - match self.0.write().outbound_policy_rx( - service_name, - service_namespace, - service_port, - source_namespace, - ) { + match self.0.write().outbound_policy_rx(target) { Ok(rx) => Ok(Some(Box::pin(tokio_stream::wrappers::WatchStream::new(rx)))), Err(_) => Ok(None), } } + async fn watch_external_policy(&self) -> ExternalPolicyStream { + Box::pin(tokio_stream::wrappers::WatchStream::new( + self.0.read().fallback_policy_rx(), + )) + } + fn lookup_ip( &self, addr: IpAddr, port: NonZeroU16, source_namespace: String, ) -> Option { - self.0 - .read() - .lookup_service(addr) - .map( - |outbound::ServiceRef { name, namespace }| OutboundDiscoverTarget { - service_name: name, - service_namespace: namespace, - service_port: port, - source_namespace, - }, - ) + let index = self.0.read(); + if let Some((namespace, name)) = index.lookup_service(addr) { + return Some(OutboundDiscoverTarget::Resource(ResourceTarget { + name, + namespace, + port, + source_namespace, + kind: Kind::Service, + })); + } + + if let Some((namespace, name)) = index.lookup_egress_network(addr, source_namespace.clone()) + { + let original_dst = SocketAddr::new(addr, port.into()); + return Some(OutboundDiscoverTarget::Resource(ResourceTarget { + name, + namespace, + port, + source_namespace, + kind: Kind::EgressNetwork(original_dst), + })); + } + + if !index.is_address_in_cluster(addr) { + let original_dst = SocketAddr::new(addr, port.into()); + return Some(OutboundDiscoverTarget::External(original_dst)); + } + + None } } diff --git a/policy-controller/src/main.rs b/policy-controller/src/main.rs index 36eaca3d24c52..3606283e757af 100644 --- a/policy-controller/src/main.rs +++ b/policy-controller/src/main.rs @@ -296,7 +296,9 @@ async fn main() -> Result<()> { if api_resource_exists::(&runtime.client()).await { let tls_routes = runtime.watch_all::(watcher::Config::default()); - let tls_routes_indexes = IndexList::new(status_index.clone()).shared(); + let tls_routes_indexes = IndexList::new(status_index.clone()) + .push(outbound_index.clone()) + .shared(); tokio::spawn( kubert::index::namespaced(tls_routes_indexes.clone(), tls_routes) .instrument(info_span!("tlsroutes.gateway.networking.k8s.io")), @@ -309,7 +311,9 @@ async fn main() -> Result<()> { if api_resource_exists::(&runtime.client()).await { let tcp_routes = runtime.watch_all::(watcher::Config::default()); - let tcp_routes_indexes = IndexList::new(status_index.clone()).shared(); + let tcp_routes_indexes = IndexList::new(status_index.clone()) + .push(outbound_index.clone()) + .shared(); tokio::spawn( kubert::index::namespaced(tcp_routes_indexes.clone(), tcp_routes) .instrument(info_span!("tcproutes.gateway.networking.k8s.io")), @@ -330,7 +334,9 @@ async fn main() -> Result<()> { let egress_networks = runtime.watch_all::(watcher::Config::default()); - let egress_networks_indexes = IndexList::new(status_index.clone()).shared(); + let egress_networks_indexes = IndexList::new(status_index.clone()) + .push(outbound_index.clone()) + .shared(); tokio::spawn( kubert::index::namespaced(egress_networks_indexes, egress_networks) .instrument(info_span!("egressnetworks")), diff --git a/policy-test/src/grpc.rs b/policy-test/src/grpc.rs index 0a84464ff5462..a25a1800e02f2 100644 --- a/policy-test/src/grpc.rs +++ b/policy-test/src/grpc.rs @@ -295,7 +295,6 @@ impl OutboundPolicyClient { svc: &k8s::Service, port: u16, ) -> Result, tonic::Status> { - use std::net::Ipv4Addr; let address = svc .spec .as_ref() @@ -303,7 +302,17 @@ impl OutboundPolicyClient { .cluster_ip .as_ref() .expect("Service must have a cluster ip"); - let ip = address.parse::().unwrap(); + self.watch_ip(ns, address, port).await + } + + pub async fn watch_ip( + &mut self, + ns: &str, + addr: &str, + port: u16, + ) -> Result, tonic::Status> { + use std::net::Ipv4Addr; + let ip = addr.parse::().unwrap(); let rsp = self .client .watch(tonic::Request::new(outbound::TrafficSpec { diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index b2dca83b24a38..e9838284f0448 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -75,6 +75,25 @@ where .expect("failed to create resource") } +/// Deletes a namespace-scoped resource. +pub async fn delete(client: &kube::Client, obj: T) +where + T: kube::Resource, + T: serde::Serialize + serde::de::DeserializeOwned + Clone + std::fmt::Debug, + T::DynamicType: Default, +{ + let params = kube::api::DeleteParams::default(); + let api = obj + .namespace() + .map(|ns| kube::Api::::namespaced(client.clone(), &ns)) + .unwrap_or_else(|| kube::Api::::default_namespaced(client.clone())); + + tracing::trace!(?obj, "Deleting"); + api.delete(&obj.name_any(), ¶ms) + .await + .expect("failed to delete resource"); +} + /// Updates a namespace-scoped resource. pub async fn update(client: &kube::Client, mut new: T) -> T where @@ -238,6 +257,16 @@ pub async fn create_service( create(client, svc).await } +/// Creates an egress network resource. +pub async fn create_egress_network( + client: &kube::Client, + ns: &str, + name: &str, +) -> k8s::policy::EgressNetwork { + let en = mk_egress_net(ns, name); + create(client, en).await +} + /// Creates a service resource. pub async fn create_opaque_service( client: &kube::Client, @@ -300,6 +329,21 @@ pub fn mk_service(ns: &str, name: &str, port: i32) -> k8s::Service { } } +pub fn mk_egress_net(ns: &str, name: &str) -> k8s::policy::EgressNetwork { + k8s::policy::EgressNetwork { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + ..Default::default() + }, + spec: k8s::policy::EgressNetworkSpec { + networks: None, + traffic_policy: k8s::policy::egress_network::TrafficPolicy::Allow, + }, + status: None, + } +} + #[track_caller] pub fn assert_svc_meta(meta: &Option, svc: &k8s::Service, port: u16) { tracing::debug!(?meta, ?svc, port, "Asserting service metadata"); diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs index f70560a312ea8..e91150a65f8e1 100644 --- a/policy-test/tests/outbound_api_gateway.rs +++ b/policy-test/tests/outbound_api_gateway.rs @@ -1,10 +1,11 @@ use futures::prelude::*; use kube::ResourceExt; +use linkerd2_proxy_api::meta; use linkerd_policy_controller_k8s_api as k8s; use linkerd_policy_test::{ assert_default_accrual_backoff, assert_svc_meta, create, create_annotated_service, - create_cluster_scoped, create_opaque_service, create_service, delete_cluster_scoped, grpc, - mk_service, outbound_api::*, with_temp_ns, + create_cluster_scoped, create_egress_network, create_opaque_service, create_service, delete, + delete_cluster_scoped, grpc, mk_service, outbound_api::*, with_temp_ns, }; use maplit::{btreemap, convert_args}; use std::{collections::BTreeMap, time::Duration}; @@ -20,7 +21,7 @@ async fn service_does_not_exist() { // Build a service but don't apply it to the cluster. let mut svc = mk_service(&ns, "my-svc", 4191); // Give it a bogus cluster ip. - svc.spec.as_mut().unwrap().cluster_ip = Some("1.1.1.1".to_string()); + svc.spec.as_mut().unwrap().cluster_ip = Some("192.168.0.2".to_string()); let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; let rsp = policy_api.watch(&ns, &svc, 4191).await; @@ -31,6 +32,45 @@ async fn service_does_not_exist() { .await; } +#[tokio::test(flavor = "current_thread")] +async fn egress_switches_to_fallback() { + with_temp_ns(|client, ns| async move { + let egress_net = create_egress_network(&client, &ns, "egress-net").await; + + let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; + let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); + + let policy = rsp.next().await.unwrap().unwrap(); + let meta = policy.metadata.unwrap(); + + let expected_meta = meta::Metadata { + kind: Some(meta::metadata::Kind::Resource(meta::Resource { + group: "policy.linkerd.io".to_string(), + port: 80, + kind: "EgressNetwork".to_string(), + name: "egress-net".to_string(), + namespace: ns.clone(), + section: "".to_string(), + })), + }; + + assert_eq!(meta, expected_meta); + + delete(&client, egress_net).await; + assert!(rsp.next().await.is_none()); + + let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); + + let policy = rsp.next().await.unwrap().unwrap(); + let meta = policy.metadata.unwrap(); + let expected_meta = meta::Metadata { + kind: Some(meta::metadata::Kind::Default("egress-fallback".to_string())), + }; + assert_eq!(meta, expected_meta); + }) + .await; +} + #[tokio::test(flavor = "current_thread")] async fn service_with_no_http_routes() { with_temp_ns(|client, ns| async move { diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index 4278e15af2451..38d19bc68ec76 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -21,7 +21,7 @@ async fn service_does_not_exist() { // Build a service but don't apply it to the cluster. let mut svc = mk_service(&ns, "my-svc", 4191); // Give it a bogus cluster ip. - svc.spec.as_mut().unwrap().cluster_ip = Some("1.1.1.1".to_string()); + svc.spec.as_mut().unwrap().cluster_ip = Some("192.168.0.2".to_string()); let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; let rsp = policy_api.watch(&ns, &svc, 4191).await;