From 0cf3b3152ab77cf94bf971cb97b872b0ac2d5c90 Mon Sep 17 00:00:00 2001 From: David O'Riordan Date: Sat, 10 Nov 2018 16:34:54 +0000 Subject: [PATCH 1/5] Update version --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 97c6bc4d..a326fc7a 100644 --- a/README.md +++ b/README.md @@ -119,7 +119,7 @@ To get minikube follow the instructions [here](https://github.com/kubernetes/min You can use the latest release (for Scala 2.11 or 2.12) by adding to your build: ```sbt -libraryDependencies += "io.skuber" %% "skuber" % "2.0.11" +libraryDependencies += "io.skuber" %% "skuber" % "2.0.12" ``` Meanwhile users of skuber v1 can continue to use the latest (and possibly final, with exception of important fixes) v1.x release, which is available only on Scala 2.11: From 0982ac699a9a85e9f317eb7ee1f7ddbb08828c98 Mon Sep 17 00:00:00 2001 From: David O'Riordan Date: Thu, 15 Nov 2018 07:49:21 +0000 Subject: [PATCH 2/5] Refactor for 2.1 release (#242) --- .../src/it/scala/skuber/DeploymentSpec.scala | 1 + client/src/it/scala/skuber/K8SFixture.scala | 3 +- .../scala/skuber/WatchContinuouslySpec.scala | 15 +- .../main/scala/skuber/api/Configuration.scala | 38 + .../scala/skuber/api/client/Cluster.scala | 13 + .../scala/skuber/api/client/Context.scala | 13 + .../skuber/api/client/KubernetesClient.scala | 378 ++++++ .../skuber/api/client/LoggingConfig.scala | 28 + .../main/scala/skuber/api/client/Status.scala | 18 + .../skuber/api/client/exec/PodExecImpl.scala | 127 ++ .../client/impl/KubernetesClientImpl.scala | 766 ++++++++++++ .../scala/skuber/api/client/package.scala | 206 ++++ .../src/main/scala/skuber/api/package.scala | 1094 ----------------- .../{ => watch}/BytesToWatchEventSource.scala | 2 +- .../api/{ => watch}/LongPollingPool.scala | 4 +- .../scala/skuber/api/{ => watch}/Watch.scala | 31 +- .../skuber/api/{ => watch}/WatchSource.scala | 45 +- .../src/main/scala/skuber/json/package.scala | 3 +- client/src/main/scala/skuber/package.scala | 54 +- .../api/BytesToWatchEventSourceSpec.scala | 1 + .../skuber/api/LongPollingPoolSpec.scala | 1 + .../scala/skuber/api/WatchSourceSpec.scala | 263 ++-- .../guestbook/KubernetesProxyActor.scala | 2 +- 23 files changed, 1831 insertions(+), 1275 deletions(-) create mode 100644 client/src/main/scala/skuber/api/client/Cluster.scala create mode 100644 client/src/main/scala/skuber/api/client/Context.scala create mode 100644 client/src/main/scala/skuber/api/client/KubernetesClient.scala create mode 100644 client/src/main/scala/skuber/api/client/LoggingConfig.scala create mode 100644 client/src/main/scala/skuber/api/client/Status.scala create mode 100644 client/src/main/scala/skuber/api/client/exec/PodExecImpl.scala create mode 100644 client/src/main/scala/skuber/api/client/impl/KubernetesClientImpl.scala create mode 100644 client/src/main/scala/skuber/api/client/package.scala delete mode 100644 client/src/main/scala/skuber/api/package.scala rename client/src/main/scala/skuber/api/{ => watch}/BytesToWatchEventSource.scala (97%) rename client/src/main/scala/skuber/api/{ => watch}/LongPollingPool.scala (98%) rename client/src/main/scala/skuber/api/{ => watch}/Watch.scala (76%) rename client/src/main/scala/skuber/api/{ => watch}/WatchSource.scala (72%) diff --git a/client/src/it/scala/skuber/DeploymentSpec.scala b/client/src/it/scala/skuber/DeploymentSpec.scala index 37da55ba..5428a47b 100644 --- a/client/src/it/scala/skuber/DeploymentSpec.scala +++ b/client/src/it/scala/skuber/DeploymentSpec.scala @@ -28,6 +28,7 @@ class DeploymentSpec extends K8SFixture with Eventually with Matchers { it should "upgrade the newly created deployment" in { k8s => k8s.get[Deployment](nginxDeploymentName).flatMap { d => + println(s"DEPLOYMENT TO UPDATE ==> $d") val updatedDeployment = d.updateContainer(getNginxContainer("1.9.1")) k8s.update(updatedDeployment).flatMap { _ => eventually(timeout(200 seconds), interval(5 seconds)) { diff --git a/client/src/it/scala/skuber/K8SFixture.scala b/client/src/it/scala/skuber/K8SFixture.scala index f58ef40c..a41cc234 100644 --- a/client/src/it/scala/skuber/K8SFixture.scala +++ b/client/src/it/scala/skuber/K8SFixture.scala @@ -5,10 +5,11 @@ import akka.stream.ActorMaterializer import org.scalatest.{FutureOutcome, fixture} import skuber.api.client._ import com.typesafe.config.ConfigFactory +import skuber.api.client.impl.KubernetesClientImpl trait K8SFixture extends fixture.AsyncFlatSpec { - override type FixtureParam = RequestContext + override type FixtureParam = K8SRequestContext implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() diff --git a/client/src/it/scala/skuber/WatchContinuouslySpec.scala b/client/src/it/scala/skuber/WatchContinuouslySpec.scala index 860a69cb..cfa4e597 100644 --- a/client/src/it/scala/skuber/WatchContinuouslySpec.scala +++ b/client/src/it/scala/skuber/WatchContinuouslySpec.scala @@ -8,6 +8,8 @@ import org.scalatest.time.{Seconds, Span} import skuber.apps.v1.{Deployment, DeploymentList} import scala.concurrent.duration._ +import scala.concurrent.Await + import scala.language.postfixOps class WatchContinuouslySpec extends K8SFixture with Eventually with Matchers with ScalaFutures { @@ -32,6 +34,9 @@ class WatchContinuouslySpec extends K8SFixture with Eventually with Matchers wit .run() } + // Wait for watch to be confirmed before performing the actions that create new events to be watched + Await.result(stream, 5.seconds) + //Create first deployment and delete it. k8s.create(deploymentOne).futureValue.name shouldBe deploymentOneName eventually { @@ -104,7 +109,15 @@ class WatchContinuouslySpec extends K8SFixture with Eventually with Matchers wit killSwitch._1.shutdown() } - stream.futureValue._2.futureValue.toList.map { d => + val f1 = stream.futureValue + + f1._2.onFailure { + case ex: Exception => ex.printStackTrace() + } + + val f2 = f1._2.futureValue + + f2.toList.map { d => (d._type, d._object.name) } shouldBe List( (EventType.ADDED, deploymentName), diff --git a/client/src/main/scala/skuber/api/Configuration.scala b/client/src/main/scala/skuber/api/Configuration.scala index c2b8370c..a617a705 100644 --- a/client/src/main/scala/skuber/api/Configuration.scala +++ b/client/src/main/scala/skuber/api/Configuration.scala @@ -1,6 +1,7 @@ package skuber.api import java.io.File +import java.net.URL import scala.collection.JavaConverters._ import scala.util.Try @@ -257,4 +258,41 @@ object Configuration { currentContext = ctx ) } + + /* + * Get the current default configuration + */ + def defaultK8sConfig = { + import java.nio.file.Paths + + val skuberUrlOverride = sys.env.get("SKUBER_URL") + skuberUrlOverride match { + case Some(url) => + Configuration.useProxyAt(url) + case None => + val skuberConfigEnv = sys.env.get("SKUBER_CONFIG") + skuberConfigEnv match { + case Some(conf) if conf == "file" => + Configuration.parseKubeconfigFile().get // default kubeconfig location + case Some(conf) if conf == "proxy" => + Configuration.useLocalProxyDefault + case Some(fileUrl) => + val path = Paths.get(new URL(fileUrl).toURI) + Configuration.parseKubeconfigFile(path).get + case None => + // try KUBECONFIG + val kubeConfigEnv = sys.env.get("KUBECONFIG") + kubeConfigEnv.map { kc => + Configuration.parseKubeconfigFile(Paths.get(kc)) + }.getOrElse { + // Try to get config from a running pod + // if that is not set then use default kubeconfig location + Configuration.inClusterConfig.orElse( + Configuration.parseKubeconfigFile() + ) + }.get + } + } + } + } diff --git a/client/src/main/scala/skuber/api/client/Cluster.scala b/client/src/main/scala/skuber/api/client/Cluster.scala new file mode 100644 index 00000000..a56ad187 --- /dev/null +++ b/client/src/main/scala/skuber/api/client/Cluster.scala @@ -0,0 +1,13 @@ +package skuber.api.client + +/** + * @author David O'Riordan + * + * Defines the details needed to communicate with the API server for a Kubernetes cluster + */ +case class Cluster( + apiVersion: String = "v1", + server: String = defaultApiServerURL, + insecureSkipTLSVerify: Boolean = false, + certificateAuthority: Option[PathOrData] = None +) diff --git a/client/src/main/scala/skuber/api/client/Context.scala b/client/src/main/scala/skuber/api/client/Context.scala new file mode 100644 index 00000000..fde41c04 --- /dev/null +++ b/client/src/main/scala/skuber/api/client/Context.scala @@ -0,0 +1,13 @@ +package skuber.api.client + +import skuber.Namespace + +/** + * @author David O'Riordan + * Define the Kubernetes API context for requests + */ +case class Context( + cluster: Cluster = Cluster(), + authInfo: AuthInfo = NoAuth, + namespace: Namespace = Namespace.default +) diff --git a/client/src/main/scala/skuber/api/client/KubernetesClient.scala b/client/src/main/scala/skuber/api/client/KubernetesClient.scala new file mode 100644 index 00000000..af60ffc1 --- /dev/null +++ b/client/src/main/scala/skuber/api/client/KubernetesClient.scala @@ -0,0 +1,378 @@ +package skuber.api.client + +import akka.stream.scaladsl.{Sink, Source} +import akka.util.ByteString +import play.api.libs.json.{Writes,Format} +import skuber.{DeleteOptions, HasStatusSubresource, LabelSelector, ListOptions, ListResource, ObjectResource, Pod, ResourceDefinition, Scale} +import skuber.api.patch.Patch + +import scala.concurrent.{Future, Promise} + +/** + * @author David O'Riordan + * + * This trait defines the skuber Kubernetes client API + * The client API supports making requests on Kubernetes resources that are represented using the Skuber case class based data model. + * Generally most methods target a specific namespace that has been configured for the client, but some methods can target other namespaces - + * the descriptions and signatures of those methods should make it clear in those cases, + * Most of the methods are typed to either a specific object resource type O or list resource type L, and require one or more of the + * following implicit parameters, which arenormally suppied when you make some standard imports as described in the programming guide: + * - A ResourceDefinition that supplies skuber with key details needed to make the call on the applicable resource type. These are + * defined on the companion object of the case class that implements the resource type so generally you do not have to explicitly + * import them. + * - A Play JSON Format instance that knows how to marshal/unmarshal values of the applicable resource type for sending to and receiving + * from the API server. Many of these are imported from specific json objects (e.g. skuber.json.format), but some are defined on + * the companion object of the applicable resource type. + * - A LoggingContext which provides additional details in the skuber logs for each call - unless overridden this will be a + * skuber.api.client.RequestLoggingContext instance, which just adds a unique request id to the request/response logs. + * + * See the Skuber programming guide and examples for more information on how to use the API. + */ +trait KubernetesClient { + + /** + * Retrieve the object resource with the specified name and type + * @tparam O the specific object resource type e.g. Pod, Deployment + * @param name the name of the object resource + * @return A future containing the retrieved resource (or an exception if resource not found) + */ + def get[O <: ObjectResource](name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] + + /** + * Retrieve the object resource with the specified name and type, returning None if resource does not exist + * @tparam O the specific object resource type e.g. Pod, Deployment + * @param name the name of the object resource + * @return A future containing Some(resource) if the resource is found on the cluster, or None if not found + */ + def getOption[O <: ObjectResource](name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Option[O]] + + /** + * Retrieve the object resource with the specified name and type from the specified namespace + * @tparam O the specific object resource type e.g. Pod, Deployment + * @param name the name of the object resource + * @param namespace the namespace containing the object resource + * @return A future conatining Some(resource) if the resource is found on the cluster otherwise None + */ + def getInNamespace[O <: ObjectResource](name: String, namespace: String)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] + + /** + * Create a new object resource. If the namespace metadata field is set to a non-empty value then the object will be created + * in that namespace, otherwise it will be created in the configured namespace of the client. + * @param obj the resource to create on the cluster + * @tparam O the specific object resource type e.g. Pod, Deployment + * @return A future containing the created resource returned by Kubernetes + */ + def create[O <: ObjectResource](obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] + + /** + * Update an existing object resource + * @param obj the resource with the desired updates + * @return A future containing the updated resource returned by Kubernetes + */ + def update[O <: ObjectResource](obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] + + /** + * Delete an existing object resource + * @param name the name of the resource to delete + * @param gracePeriodSeconds optional parameter specifying a grace period to be applied before hard killing the resource + * @return A future that will be set to success if the deletion request was accepted by Kubernetes, otherwise failure + */ + def delete[O <: ObjectResource](name: String, gracePeriodSeconds: Int = -1)(implicit rd: ResourceDefinition[O], lc: LoggingContext): Future[Unit] + + /** + * Delete an existing object resource + * @param name the name of the resource to delete + * @param options contains various options that can be passed to the deletion operation, see Kubernetes documentation + * @tparam O the specific object resource type e.g. Pod, Deployment + * @return A future that will be set to success if the deletion request was accepted by Kubernetes, otherwise failure + */ + def deleteWithOptions[O <: ObjectResource](name: String, options: DeleteOptions)(implicit rd: ResourceDefinition[O], lc: LoggingContext): Future[Unit] + + /** + * Delete all resources of specified type in current namespace + * @tparam L list resource type of resources to delete e.g. PodList, DeploymentList + * @return A future containing the list of all deleted resources + */ + def deleteAll[L <: ListResource[_]]()(implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] + + /** + * Delete all resources of specified type selected by a specified label selector in current namespace + * @param labelSelector selects the resources to delete + * @tparam L the list resource type of resources to delete e.g. PodList, DeploymentList + * @return A future containing the list of all deleted resources + */ + def deleteAllSelected[L <: ListResource[_]](labelSelector: LabelSelector)(implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] + + /** + * Return a list of the names of all namespaces in the cluster + * @return a future containing the list of names of all namespaces in the cluster + */ + def getNamespaceNames(implicit lc: LoggingContext): Future[List[String]] + + /** + * Get list of all resources across all namespaces in the cluster of a specified list type, grouped by namespace + * @tparam L the list resource type of resources to list e.g. PodList, DeploymentList + * @return A future with a map containing an entry for each namespace, each entry consists of a list of resources keyed by the name of their namesapce + */ + def listByNamespace[L <: ListResource[_]]()(implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[Map[String, L]] + + /** + * Get list of resources of a given type in a specified namespace + * @param theNamespace the namespace to search + * @tparam L the list resource type of the objects to retrieve e.g. PodList, DeploymentList + * @return A future containing the resource list retrieved + */ + def listInNamespace[L <: ListResource[_]](theNamespace: String)(implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] + + /** + * Get list of all resources of specified type in the configured namespace for the client + * @tparam L the list type to retrieve e.g. PodList, DeploymentList + * @return A future containing the resource list retrieved + */ + def list[L <: ListResource[_]]()(implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] + + /** + * Get list of selected resources of specified type in the configured namespace for the client + * @param labelSelector the label selector to use to select the resources to return + * @tparam L the list type of the resources to retrieve e.g. PodList, DeploymentList + * @return A future containing the resource list retrieved + */ + def listSelected[L <: ListResource[_]](labelSelector: LabelSelector)(implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] + + /** + * Get list of resources of specified type, applying the specified options to the list request + * @param options a set of options to be added to the request that can modify how the request is handled by Kubernetes. + * @tparam L the list type of the resources to retrieve e.g. PodList, DeploymentList + * @return A future containing the resource list retrieved + */ + def listWithOptions[L <: ListResource[_]](options: ListOptions)(implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] + + /** + * Update the status subresource of a given object resource. Only supported by certain object resource kinds (which need to have defined an + * implicit HasStatusResource) + * This method is generally for advanced use cases such as custom controllers + * @param statusEv this implicit provides evidence that the resource kind has status subresources, so supports this method + * @param obj the name of the object resource whose status subresource is to be updated + * @tparam O The resource type + * @return A future containing the full updated object resource + */ + def updateStatus[O <: ObjectResource](obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O],statusEv: HasStatusSubresource[O], lc: LoggingContext): Future[O] + + /** + * Get the status subresource of a given object resource. Only supported by certain object resource kinds (which need to have defined an + * implicit HasStatusResource) + * This method is generally for advanced use cases such as custom controllers. + * @param name the name of the object resource + * @param statusEv this implicit provides evidence that the resource kind has status subresources, so supports this method + * @tparam O the resource type e.g. Pod, Deployment + * @return A future containing the object resource including current status + */ + def getStatus[O <: ObjectResource](name: String)(implicit fmt: Format[O], rd: ResourceDefinition[O],statusEv: HasStatusSubresource[O], lc: LoggingContext): Future[O] + + /** + * Place a watch on a specific object - this returns a source of events that will be produced whenever the object is added, modified or deleted + * on the cluster + * Note: Most applications should probably use watchContinuously instead, which transparently reconnects and continues the watch in the case of server + * timeouts - the source returned by this method will complete in the presence of such timeouts or other disconnections. + * @param obj the name of the object to watch + * @tparam O the type of the object to watch e.g. Pod, Deployment + * @return A future containing an Akka streams Source of WatchEvents that will be emitted + */ + def watch[O <: ObjectResource](obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Source[WatchEvent[O], _]] + + /** + * Place a watch for any changes to a specific object, optionally since a given resource version - this returns a source of events that will be produced + * whenever the object is modified or deleted on the cluster, if the resource version on the updated object is greater than or equal to that specified. + * Note: Most applications should probably use watchContinuously instead, which transparently reconnects and continues the watch in the case of server + * timeouts - the source returned by this method will complete in the presence of such timeouts or other disconnections. + * @param name the name of the object + * @param sinceResourceVersion the resource version - normally the applications gets the current resource from the metadata of a list call on the + * applicable type (e.g. PodList, DeploymentList) and then supplies that to this method. If no resource version is specified, a single ADDED event will + * be produced for an already existing object followed by events for any future changes. + * @param bufSize An optional buffer size for the returned on-the-wire representation of each modified object - normally the default is more than enough. + * @tparam O the type of the resource to watch + * @return A future containing an Akka streams Source of WatchEvents that will be emitted + */ + def watch[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Source[WatchEvent[O], _]] + + /** + * Place a watch on changes to all objects of a specific resource type - this returns a source of events that will be produced whenever an object + * of the specified type is added, modified or deleted on the cluster + * Note: Most applications should probably use watchAllContinuously instead, which transparently reconnects and continues the watch in the case of server + * timeouts - the source returned by this method will complete in the presence of such timeouts or other disconnections. + * + * @param sinceResourceVersion the resource version - normally the applications gets the current resource from the metadata of a list call on the + * applicable type (e.g. PodList, DeploymentList) and then supplies that to this method. If no resource version is specified, a single ADDED event will + * be produced for an already existing object followed by events for any future changes. + * @param bufSize optional buffer size for each modified object received, normally the default is more than enough + * @tparam O the type of resource to watch e.g. Pod, Dpeloyment + * @return A future containing an Akka streams Source of WatchEvents that will be emitted + */ + def watchAll[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Source[WatchEvent[O], _]] + + /** + * Watch a specific object resource continuously. This returns a source that will continue to produce + * events on any updates to the object even if the server times out, by transparently restarting the watch as needed. + * @param obj the object resource to watch + * @tparam O the type of the resource e.g Pod + * @return A future containing an Akka streams Source of WatchEvents that will be emitted + */ + def watchContinuously[O <: ObjectResource](obj: O)(implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] + + /** + * Watch a specific object resource continuously. This returns a source that will continue to produce + * events on any updates to the object even if the server times out, by transparently restarting the watch as needed. + * The optional resourceVersion can be used to specify that only events on versions of the object greater than or equal to + * the resource version should be produced. + * + * @param name the name of the resource to watch + * @param sinceResourceVersion the resource version - normally the applications gets the current resource version from the metadata of a list call on the + * applicable type (e.g. PodList, DeploymentList) and then supplies that to this method to receive any future updates. If no resource version is specified, + * a single ADDED event will be produced for an already existing object followed by events for any future changes. + * @param bufSize optional buffer size for received object updates, normally the default is more than enough + * @tparam O the type of the resource + * @return A future containing an Akka streams Source of WatchEvents that will be emitted + */ + def watchContinuously[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] + + /** + * Watch all object resources of a specified type continuously. This returns a source that will continue to produce + * events even if the server times out, by transparently restarting the watch as needed. + * The optional resourceVersion can be used to specify that only events on versions of objects greater than or equal to + * the resource version should be produced. + * + * @param sinceResourceVersion the resource version - normally the applications gets the current resource version from the metadata of a list call on the + * applicable type (e.g. PodList, DeploymentList) and then supplies that to this method to receive any future updates. If no resource version is specified, + * a single ADDED event will be produced for an already existing object followed by events for any future changes. + * @param bufSize optional buffer size for received object updates, normally the default is more than enough + * @tparam O the type pf the resource + * @return A future containing an Akka streams Source of WatchEvents that will be emitted + */ + def watchAllContinuously[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] + +/** + * Watch all object resources of a specified type continuously, passing the specified options to the API server with the watch request. + * This returns a source that will continue to produce events even if the server times out, by transparently restarting the watch as needed. + * @param options a set of list options to pass to the server. See https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ListOptions + * for the meaning of the options. Note that the `watch` flag in the options will be ignored / overridden by the client, which + * ensures a watch is always requested on the server. + * @param bufsize optional buffer size for received object updates, normally the default is more than enough + * @tparam O the resource type to watch + * @return A future containing an Akka streams Source of WatchEvents that will be emitted + */ + def watchWithOptions[O <: ObjectResource](options: ListOptions, bufsize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] + + /** + * Get the scale subresource of the named object resource + * This can only be called on certain resource types that support scale subresources. + * Normally used in advanced use cases such as custom controllers + * @param objName the name of the resource + * @param sc this implicit parameter provides evidence that the resource type supports scale subresources. Normally defined in the companion + * object of the resource type if applicable so does not need to be imported. + * @tparam O the type of the resource e.g. Pod + * @return a future containing the scale subresource + */ + def getScale[O <: ObjectResource](objName: String)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext) : Future[Scale] + + /** + * Update the scale subresource of a specified resource + * This can only be called on certain resource types that support scale subresources. + * Normally used in advanced use cases such as custom controllers + * + * @param objName the name of the resource + * @param scale the updated scale to set on the resource + * @tparam O the type of the resource + * @param sc this implicit parameter provides evidence that the resource type supports scale subresources. Normally defined in the companion + * object of the resource type if applicable so does not need to be imported + * @return a future containing the successfully updated scale subresource + */ + def updateScale[O <: ObjectResource](objName: String, scale: Scale)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext): Future[Scale] + + @deprecated("use getScale followed by updateScale instead") + def scale[O <: ObjectResource](objName: String, count: Int)(implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext): Future[Scale] + + /** + * Patch a resource + * @param name The name of the resource to patch + * @param patchData The patch data to apply to the resource + * @param namespace the namespace (defaults to currently configured namespace) + * @param patchfmt an implicit parameter that knows how to serialise the patch data to Json + * @tparam P the patch type (specifies the patch strategy details) + * @tparam O the type of the resource to be patched + * @return a future conating the patched resource + */ + def patch[P <: Patch, O <: ObjectResource](name: String, patchData: P, namespace: Option[String] = None) + (implicit patchfmt: Writes[P], fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): Future[O] + + /** + * Apply a patch to a specified object resource using json merge patch strategy + * @param obj the name of the resource to patch + * @param patch the patch (in json patch format) + * @tparam O the type of the resource + * @return A future containing the patched resource + */ + @deprecated("use patch instead","v2.1") + def jsonMergePatch[O <: ObjectResource](obj: O, patch: String)(implicit rd: ResourceDefinition[O], fmt: Format[O], lc: LoggingContext): Future[O] + + /** + * Get the logs from a pod (similar to `kubectl logs ...`). The logs are streamed using an Akka streams source + * @param name the name of the pod + * @param queryParams optional parameters of the request (for example container name) + * @param namespace if set this specifies the namespace of the pod (otherwise the configured namespace is used) + * @return A future containing a Source for the logs stream. + */ + def getPodLogSource(name: String, queryParams: Pod.LogQueryParams, namespace: Option[String] = None)(implicit lc: LoggingContext): Future[Source[ByteString, _]] + + /** + * Execute a command in a pod (similar to `kubectl exec ...`) + * @param podName the name of the pod + * @param command the command to execute + * @param maybeContainerName an optional container name + * @param maybeStdin optional Akka Source for sending input to stdin for the command + * @param maybeStdout optional Akka Sink to receive output from stdout for the command + * @param maybeStderr optional Akka Sink to recieve output from stderr for the command + * @param tty optionally set tty on + * @param maybeClose if set, this Promise can be used to terminate the command + * @return A future indicating the exec command has been submitted + */ + def exec( + podName: String, + command: Seq[String], + maybeContainerName: Option[String] = None, + maybeStdin: Option[Source[String, _]] = None, + maybeStdout: Option[Sink[String, _]] = None, + maybeStderr: Option[Sink[String, _]] = None, + tty: Boolean = false, + maybeClose: Option[Promise[Unit]] = None)(implicit lc: LoggingContext): Future[Unit] + + /** + * Return list of API versions supported by the server + * @param lc + * @return a future containing the list of API versions + */ + def getServerAPIVersions(implicit lc: LoggingContext): Future[List[String]] + + /** + * Create a new KubernetesClient instance that reuses this clients configuration and connection resources, but with a different + * target namespace. + * This is useful for applications that need a lightweight way to target multiple or dynamic namespaces. + * @param newNamespace + * @return + */ + def usingNamespace(newNamespace: String): KubernetesClient + + /** + * Closes the client. Any requests to the client after this is called will be rejected. + */ + def close: Unit + + // Some parameters of the client that it may be useful for some applications to read + val logConfig: LoggingConfig // the logging configuration for client requests + val clusterServer: String // the URL of the target Kubernetes API server + val namespaceName: String // the name of the configured namespace for this client +} diff --git a/client/src/main/scala/skuber/api/client/LoggingConfig.scala b/client/src/main/scala/skuber/api/client/LoggingConfig.scala new file mode 100644 index 00000000..c8860ede --- /dev/null +++ b/client/src/main/scala/skuber/api/client/LoggingConfig.scala @@ -0,0 +1,28 @@ +package skuber.api.client + +/** + * @author David O'Riordan + * + */ + +import LoggingConfig.loggingEnabled + +case class LoggingConfig( + logConfiguration: Boolean=loggingEnabled("config", true), // outputs configuration on initialisation) + logRequestBasic: Boolean=loggingEnabled("request", true), // logs method and URL for request + logRequestBasicMetadata: Boolean=loggingEnabled("request.metadata", false), // logs key resource metadata information if available + logRequestFullObjectResource: Boolean=loggingEnabled("request.object.full", false), // outputs full object resource if available + logResponseBasic: Boolean=loggingEnabled("response", true), // logs basic response info (status code) + logResponseBasicMetadata: Boolean=loggingEnabled("response.metadata", false), // logs some basic metadata from the returned resource, if available + logResponseFullObjectResource: Boolean=loggingEnabled("response.object.full", false), // outputs full received object resource, if available + logResponseListSize: Boolean=loggingEnabled("response.list.size", false), // logs size of any returned list resource + logResponseListNames: Boolean=loggingEnabled("response.list.names", false), // logs list of names of items in any returned list resource + logResponseFullListResource: Boolean= loggingEnabled("response.list.full", false) // outputs full contained object resources in list resources +) + +object LoggingConfig { + private def loggingEnabled(logEventType: String, fallback: Boolean) : Boolean= { + sysProps.get(s"skuber.log.$logEventType").map(_ => true).getOrElse(fallback) + } +} + diff --git a/client/src/main/scala/skuber/api/client/Status.scala b/client/src/main/scala/skuber/api/client/Status.scala new file mode 100644 index 00000000..4abec40e --- /dev/null +++ b/client/src/main/scala/skuber/api/client/Status.scala @@ -0,0 +1,18 @@ +package skuber.api.client + +import skuber.ListMeta + +/** + * @author David O'Riordan + * Represents the status information typically returned in error responses from the Kubernetes API + */ +case class Status( + apiVersion: String = "v1", + kind: String = "Status", + metadata: ListMeta = ListMeta(), + status: Option[String] = None, + message: Option[String]= None, + reason: Option[String] = None, + details: Option[Any] = None, + code: Option[Int] = None // HTTP status code +) diff --git a/client/src/main/scala/skuber/api/client/exec/PodExecImpl.scala b/client/src/main/scala/skuber/api/client/exec/PodExecImpl.scala new file mode 100644 index 00000000..9918d1ed --- /dev/null +++ b/client/src/main/scala/skuber/api/client/exec/PodExecImpl.scala @@ -0,0 +1,127 @@ +package skuber.api.client.exec + +import akka.actor.ActorSystem +import akka.http.scaladsl.model.headers.RawHeader +import akka.http.scaladsl.model.{HttpHeader, StatusCodes, Uri, ws} +import akka.http.scaladsl.{ConnectionContext, Http} +import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Partition, Sink, Source} +import akka.stream.{Materializer, SinkShape} +import akka.util.ByteString +import akka.{Done, NotUsed} +import skuber.api.client.impl.KubernetesClientImpl +import skuber.api.client.{K8SException, LoggingContext, Status} +import skuber.api.security.HTTPRequestAuth + +import scala.concurrent.duration._ +import scala.concurrent.{ExecutionContext, Future, Promise} + +/** + * Implementation of pod exec support + */ +object PodExecImpl { + + private[client] def exec( + requestContext: KubernetesClientImpl, + podName: String, + command: Seq[String], + maybeContainerName: Option[String] = None, + maybeStdin: Option[Source[String, _]] = None, + maybeStdout: Option[Sink[String, _]] = None, + maybeStderr: Option[Sink[String, _]] = None, + tty: Boolean = false, + maybeClose: Option[Promise[Unit]] = None)(implicit sys: ActorSystem, mat: Materializer, lc : LoggingContext): Future[Unit] = + { + implicit val executor: ExecutionContext = sys.dispatcher + + val containerPrintName = maybeContainerName.getOrElse("") + requestContext.log.info(s"Trying to connect to container ${containerPrintName} of pod ${podName}") + + // Compose queries + var queries: Seq[(String, String)] = Seq( + "stdin" -> maybeStdin.isDefined.toString, + "stdout" -> maybeStdout.isDefined.toString, + "stderr" -> maybeStderr.isDefined.toString, + "tty" -> tty.toString + ) + maybeContainerName.foreach { containerName => + queries ++= Seq("container" -> containerName) + } + queries ++= command.map("command" -> _) + + // Determine scheme and connection context based on SSL context + val (scheme, connectionContext) = requestContext.sslContext match { + case Some(ssl) => + ("wss",ConnectionContext.https(ssl, enabledProtocols = Some(scala.collection.immutable.Seq("TLSv1.2", "TLSv1")))) + case None => + ("ws", Http().defaultClientHttpsContext) + } + + // Compose URI + val uri = Uri(requestContext.clusterServer) + .withScheme(scheme) + .withPath(Uri.Path(s"/api/v1/namespaces/${requestContext.namespaceName}/pods/${podName}/exec")) + .withQuery(Uri.Query(queries: _*)) + + // Compose headers + var headers: List[HttpHeader] = List(RawHeader("Accept", "*/*")) + headers ++= HTTPRequestAuth.getAuthHeader(requestContext.requestAuth).map(a => List(a)).getOrElse(List()) + + // Convert `String` to `ByteString`, then prepend channel bytes + val source: Source[ws.Message, Promise[Option[ws.Message]]] = maybeStdin.getOrElse(Source.empty).viaMat(Flow[String].map { s => + ws.BinaryMessage(ByteString(0).concat(ByteString(s))) + })(Keep.right).concatMat(Source.maybe[ws.Message])(Keep.right) + + // Split the sink from websocket into stdout and stderr then remove first bytes which indicate channels + val sink: Sink[ws.Message, NotUsed] = Sink.fromGraph(GraphDSL.create() { implicit builder => + import GraphDSL.Implicits._ + + val partition = builder.add(Partition[ws.Message](2, { + case bm: ws.BinaryMessage.Strict if bm.data(0) == 1 => + 0 + case bm: ws.BinaryMessage.Strict if bm.data(0) == 2 => + 1 + })) + + def convertSink = Flow[ws.Message].map[String] { + case bm: ws.BinaryMessage.Strict => + bm.data.utf8String.substring(1) + } + + partition.out(0) ~> convertSink ~> maybeStdout.getOrElse(Sink.ignore) + partition.out(1) ~> convertSink ~> maybeStderr.getOrElse(Sink.ignore) + + SinkShape(partition.in) + }) + + + // Make a flow from the source to the sink + val flow: Flow[ws.Message, ws.Message, Promise[Option[ws.Message]]] = Flow.fromSinkAndSourceMat(sink, source)(Keep.right) + + // upgradeResponse completes or fails when the connection succeeds or fails + // and promise controls the connection close timing + val (upgradeResponse, promise) = Http().singleWebSocketRequest(ws.WebSocketRequest(uri, headers, subprotocol = Option("channel.k8s.io")), flow, connectionContext) + + val connected = upgradeResponse.map { upgrade => + // just like a regular http request we can access response status which is available via upgrade.response.status + // status code 101 (Switching Protocols) indicates that server support WebSockets + if (upgrade.response.status == StatusCodes.SwitchingProtocols) { + Done + } else { + val message = upgrade.response.entity.toStrict(1000.millis).map(_.data.utf8String) + throw new K8SException(Status(message = + Some(s"Connection failed with status ${upgrade.response.status}"), + details = Some(message), code = Some(upgrade.response.status.intValue()))) + } + } + + val close = maybeClose.getOrElse(Promise.successful(())) + connected.foreach { _ => + requestContext.log.info(s"Connected to container ${containerPrintName} of pod ${podName}") + close.future.foreach { _ => + requestContext.log.info(s"Close the connection of container ${containerPrintName} of pod ${podName}") + promise.success(None) + } + } + Future.sequence(Seq(connected, close.future, promise.future)).map { _ => () } + } +} diff --git a/client/src/main/scala/skuber/api/client/impl/KubernetesClientImpl.scala b/client/src/main/scala/skuber/api/client/impl/KubernetesClientImpl.scala new file mode 100644 index 00000000..3bdbd3a6 --- /dev/null +++ b/client/src/main/scala/skuber/api/client/impl/KubernetesClientImpl.scala @@ -0,0 +1,766 @@ +package skuber.api.client.impl + +import akka.actor.ActorSystem +import akka.event.Logging +import akka.http.scaladsl.marshalling.Marshal +import akka.http.scaladsl.model._ +import akka.http.scaladsl.settings.{ClientConnectionSettings, ConnectionPoolSettings} +import akka.http.scaladsl.unmarshalling.Unmarshal +import akka.http.scaladsl.{ConnectionContext, Http, HttpsConnectionContext} +import akka.stream.Materializer +import akka.stream.scaladsl.{Sink, Source} +import akka.util.ByteString +import com.typesafe.config.{Config, ConfigFactory} +import javax.net.ssl.SSLContext +import play.api.libs.json.{Format, Writes, Reads} +import skuber._ +import skuber.api.client.exec.PodExecImpl +import skuber.api.client.{K8SException => _, _} +import skuber.api.security.{HTTPRequestAuth, TLS} +import skuber.api.watch.{LongPollingPool, Watch, WatchSource} +import skuber.json.PlayJsonSupportForAkkaHttp._ +import skuber.json.format.apiobj.statusReads +import skuber.json.format.{apiVersionsFormat, deleteOptionsFmt, namespaceListFmt} +import skuber.api.patch._ + +import scala.concurrent.duration._ +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.{Failure, Success} + + +/** + * @author David O'Riordan + * This class implements the KubernetesClient API. It uses the Akka HTTP client to handle the requests to + * the Kubernetes API server. + */ +class KubernetesClientImpl private[client] ( + val requestMaker: (Uri, HttpMethod) => HttpRequest, // builds the requests to send + override val clusterServer: String, // the url of the target cluster Kubernetes API server + val requestAuth: AuthInfo, // specifies the authentication (if any) to be added to requests + override val namespaceName: String, // by default requests will target this specified namespace on the cluster + val watchContinuouslyRequestTimeout: Duration, + val watchContinuouslyIdleTimeout: Duration, + val watchPoolIdleTimeout: Duration, + val watchSettings: ConnectionPoolSettings, + val podLogSettings: ConnectionPoolSettings, + val sslContext: Option[SSLContext], // provides the Akka client with the SSL details needed for https connections to the API server + override val logConfig: LoggingConfig, + val closeHook: Option[() => Unit])(implicit val actorSystem: ActorSystem, val materializer: Materializer, val executionContext: ExecutionContext) + extends KubernetesClient +{ + val log = Logging.getLogger(actorSystem, "skuber.api") + + val connectionContext = sslContext + .map { ssl => + ConnectionContext.https(ssl, enabledProtocols = Some(scala.collection.immutable.Seq("TLSv1.2", "TLSv1"))) + } + .getOrElse(Http().defaultClientHttpsContext) + + + private val clusterServerUri = Uri(clusterServer) + + private var isClosed = false + + private[skuber] def invokeWatch(request: HttpRequest)(implicit lc: LoggingContext): Future[HttpResponse] = invoke(request, watchSettings) + private[skuber] def invokeLog(request: HttpRequest)(implicit lc: LoggingContext): Future[HttpResponse] = invoke(request, podLogSettings) + private[skuber] def invoke(request: HttpRequest, settings: ConnectionPoolSettings = ConnectionPoolSettings(actorSystem))(implicit lc: LoggingContext): Future[HttpResponse] = { + if (isClosed) { + logError("Attempt was made to invoke request on closed API request context") + throw new IllegalStateException("Request context has been closed") + } + logInfo(logConfig.logRequestBasic, s"about to send HTTP request: ${request.method.value} ${request.uri.toString}") + val responseFut = Http().singleRequest(request, settings = settings, connectionContext = connectionContext) + responseFut onComplete { + case Success(response) => logInfo(logConfig.logResponseBasic,s"received response with HTTP status ${response.status.intValue()}") + case Failure(ex) => logError("HTTP request resulted in an unexpected exception",ex) + } + responseFut + } + + private[skuber] def buildRequest[T <: TypeMeta]( + method: HttpMethod, + rd: ResourceDefinition[_], + nameComponent: Option[String], + query: Option[Uri.Query] = None, + namespace: String = namespaceName): HttpRequest = + { + val nsPathComponent = if (rd.spec.scope == ResourceSpecification.Scope.Namespaced) { + Some("namespaces/" + namespace) + } else { + None + } + + val k8sUrlOptionalParts = List( + clusterServer, + rd.spec.apiPathPrefix, + rd.spec.group, + rd.spec.defaultVersion, + nsPathComponent, + rd.spec.names.plural, + nameComponent) + + val k8sUrlParts = k8sUrlOptionalParts collect { + case p: String if p != "" => p + case Some(p: String) if p != "" => p + } + + val k8sUrlStr = k8sUrlParts.mkString("/") + + val uri = query.map { q => + Uri(k8sUrlStr).withQuery(q) + }.getOrElse { + Uri(k8sUrlStr) + } + + val req = requestMaker(uri, method) + HTTPRequestAuth.addAuth(req, requestAuth) + } + + private[skuber] def logInfo(enabledLogEvent: Boolean, msg: => String)(implicit lc: LoggingContext) = + { + if (log.isInfoEnabled && enabledLogEvent) { + log.info(s"[ ${lc.output} - ${msg}]") + } + } + + private[skuber] def logInfoOpt(enabledLogEvent: Boolean, msgOpt: => Option[String])(implicit lc: LoggingContext) = + { + if (log.isInfoEnabled && enabledLogEvent) { + msgOpt foreach { msg => + log.info(s"[ ${lc.output} - ${msg}]") + } + } + } + + private[skuber] def logWarn(msg: String)(implicit lc: LoggingContext) = + { + log.error(s"[ ${lc.output} - $msg ]") + } + + private[skuber] def logError(msg: String)(implicit lc: LoggingContext) = + { + log.error(s"[ ${lc.output} - $msg ]") + } + + private[skuber] def logError(msg: String, ex: Throwable)(implicit lc: LoggingContext) = + { + log.error(ex, s"[ ${lc.output} - $msg ]") + } + + private[skuber] def logDebug(msg : => String)(implicit lc: LoggingContext) = { + if (log.isDebugEnabled) + log.debug(s"[ ${lc.output} - $msg ]") + } + + private[skuber] def logRequestObjectDetails[O <: ObjectResource](method: HttpMethod,resource: O)(implicit lc: LoggingContext) = { + logInfoOpt(logConfig.logRequestBasicMetadata, { + val name = resource.name + val version = resource.metadata.resourceVersion + method match { + case HttpMethods.PUT | HttpMethods.PATCH => Some(s"Requesting update of resource: { name:$name, version:$version ... }") + case HttpMethods.POST => Some(s"Requesting creation of resource: { name: $name ...}") + case _ => None + } + } + ) + logInfo(logConfig.logRequestFullObjectResource, s" Marshal and send: ${resource.toString}") + } + + private[skuber] def logReceivedObjectDetails[O <: ObjectResource](resource: O)(implicit lc: LoggingContext) = + { + logInfo(logConfig.logResponseBasicMetadata, s" resource: { kind:${resource.kind} name:${resource.name} version:${resource.metadata.resourceVersion} ... }") + logInfo(logConfig.logResponseFullObjectResource, s" received and parsed: ${resource.toString}") + } + + private[skuber] def logReceivedListDetails[L <: ListResource[_]](result: L)(implicit lc: LoggingContext) = + { + logInfo(logConfig.logResponseBasicMetadata,s"received list resource of kind ${result.kind}") + logInfo(logConfig.logResponseListSize,s"number of items in received list resource: ${result.items.size}") + logInfo(logConfig.logResponseListNames, s"received ${result.kind} contains item(s): ${result.itemNames}]") + logInfo(logConfig.logResponseFullListResource, s" Unmarshalled list resource: ${result.toString}") + } + + private[skuber] def makeRequestReturningObjectResource[O <: ObjectResource](httpRequest: HttpRequest)( + implicit fmt: Format[O], lc: LoggingContext): Future[O] = + { + for { + httpResponse <- invoke(httpRequest) + result <- toKubernetesResponse[O](httpResponse) + _ = logReceivedObjectDetails(result) + } yield result + } + + private[skuber] def makeRequestReturningListResource[L <: ListResource[_]](httpRequest: HttpRequest)( + implicit fmt: Format[L], lc: LoggingContext): Future[L] = + { + for { + httpResponse <- invoke(httpRequest) + result <- toKubernetesResponse[L](httpResponse) + _ = logReceivedListDetails(result) + } yield result + } + + /** + * Modify the specified K8S resource using a given HTTP method. The modified resource is returned. + * The create, update and partiallyUpdate methods all call this, just passing different HTTP methods + */ + private[skuber] def modify[O <: ObjectResource](method: HttpMethod)(obj: O)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = + { + // if this is a POST we don't include the resource name in the URL + val nameComponent: Option[String] = method match { + case HttpMethods.POST => None + case _ => Some(obj.name) + } + modify(method, obj, nameComponent) + } + + private[skuber] def modify[O <: ObjectResource](method: HttpMethod, obj: O, nameComponent: Option[String])( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = + { + // Namespace set in the object metadata (if set) has higher priority than that of the + // request context (see Issue #204) + val targetNamespace = if (obj.metadata.namespace.isEmpty) namespaceName else obj.metadata.namespace + + logRequestObjectDetails(method, obj) + val marshal = Marshal(obj) + for { + requestEntity <- marshal.to[RequestEntity] + httpRequest = buildRequest(method, rd, nameComponent, namespace = targetNamespace) + .withEntity(requestEntity.withContentType(MediaTypes.`application/json`)) + newOrUpdatedResource <- makeRequestReturningObjectResource[O](httpRequest) + } yield newOrUpdatedResource + } + + override def create[O <: ObjectResource](obj: O)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = + { + modify(HttpMethods.POST)(obj) + } + + override def update[O <: ObjectResource](obj: O)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = + { + modify(HttpMethods.PUT)(obj) + } + + override def updateStatus[O <: ObjectResource](obj: O)(implicit + fmt: Format[O], + rd: ResourceDefinition[O], + statusEv: HasStatusSubresource[O], + lc: LoggingContext): Future[O] = + { + val statusSubresourcePath=s"${obj.name}/status" + modify(HttpMethods.PUT,obj,Some(statusSubresourcePath)) + } + + override def getStatus[O <: ObjectResource](name: String)(implicit + fmt: Format[O], + rd: ResourceDefinition[O], + statusEv: HasStatusSubresource[O], + lc: LoggingContext): Future[O] = + { + _get[O](s"${name}/status") + } + + override def getNamespaceNames(implicit lc: LoggingContext): Future[List[String]] = + { + list[NamespaceList].map { namespaceList => + val namespaces = namespaceList.items + namespaces.map(_.name) + } + } + + /* + * List by namespace returns a map of namespace (specified by name e.g. "default", "kube-sys") to the list of objects + * of the specified kind in said namespace. All namespaces in the cluster are included in the map. + * For example, it can be used to get a single list of all objects of the given kind across the whole cluster + * e.g. val allPodsInCluster: Future[List[Pod]] = listByNamespace[Pod] map { _.values.flatMap(_.items) } + * which supports the feature requested in issue #20 + */ + override def listByNamespace[L <: ListResource[_]]()( + implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[Map[String, L]] = + { + listByNamespace[L](rd) + } + + private def listByNamespace[L <: ListResource[_]](rd: ResourceDefinition[_]) + (implicit fmt: Format[L], lc: LoggingContext): Future[Map[String, L]] = + { + val nsNamesFut: Future[List[String]] = getNamespaceNames + val tuplesFut: Future[List[(String, L)]] = nsNamesFut flatMap { nsNames: List[String] => + Future.sequence(nsNames map { (nsName: String) => + listInNamespace[L](nsName, rd) map { l => (nsName, l) } + }) + } + tuplesFut map { + _.toMap[String, L] + } + } + + /* + * List all objects of given kind in the specified namespace on the cluster + */ + override def listInNamespace[L <: ListResource[_]](theNamespace: String)( + implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] = + { + listInNamespace[L](theNamespace, rd) + } + + private def listInNamespace[L <: ListResource[_]](theNamespace: String, rd: ResourceDefinition[_])( + implicit fmt: Format[L], lc: LoggingContext): Future[L] = + { + val req = buildRequest(HttpMethods.GET, rd, None, namespace = theNamespace) + makeRequestReturningListResource[L](req) + } + + /* + * List objects of specific resource kind in current namespace + */ + override def list[L <: ListResource[_]]()( + implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] = + { + _list[L](rd, None) + } + + /* + * Retrieve the list of objects of given type in the current namespace that match the supplied label selector + */ + override def listSelected[L <: ListResource[_]](labelSelector: LabelSelector)( + implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] = + { + _list[L](rd, Some(ListOptions(labelSelector=Some(labelSelector)))) + } + + override def listWithOptions[L <: ListResource[_]](options: ListOptions)( + implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] = + { + _list[L](rd, Some(options)) + } + + private def _list[L <: ListResource[_]](rd: ResourceDefinition[_], maybeOptions: Option[ListOptions])( + implicit fmt: Format[L], lc: LoggingContext): Future[L] = + { + val queryOpt = maybeOptions map { opts => + Uri.Query(opts.asMap) + } + if (log.isDebugEnabled) { + val optsInfo = maybeOptions map { opts => s" with options '${opts.asMap.toString}'" } getOrElse "" + logDebug(s"[List request: resources of kind '${rd.spec.names.kind}'${optsInfo}") + } + val req = buildRequest(HttpMethods.GET, rd, None, query = queryOpt) + makeRequestReturningListResource[L](req) + } + + override def getOption[O <: ObjectResource](name: String)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Option[O]] = + { + _get[O](name) map { result => + Some(result) + } recover { + case ex: K8SException if ex.status.code.contains(StatusCodes.NotFound.intValue) => None + } + } + + override def get[O <: ObjectResource](name: String)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = + { + _get[O](name) + } + + override def getInNamespace[O <: ObjectResource](name: String, namespace: String)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = + { + _get[O](name, namespace) + } + + private[api] def _get[O <: ObjectResource](name: String, namespace: String = namespaceName)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = + { + val req = buildRequest(HttpMethods.GET, rd, Some(name), namespace = namespace) + makeRequestReturningObjectResource[O](req) + } + + override def delete[O <: ObjectResource](name: String, gracePeriodSeconds: Int = -1)( + implicit rd: ResourceDefinition[O], lc: LoggingContext): Future[Unit] = + { + val grace=if (gracePeriodSeconds >= 0) Some(gracePeriodSeconds) else None + val options = DeleteOptions(gracePeriodSeconds = grace) + deleteWithOptions[O](name, options) + } + + override def deleteWithOptions[O <: ObjectResource](name: String, options: DeleteOptions)( + implicit rd: ResourceDefinition[O], lc: LoggingContext): Future[Unit] = + { + val marshalledOptions = Marshal(options) + for { + requestEntity <- marshalledOptions.to[RequestEntity] + request = buildRequest(HttpMethods.DELETE, rd, Some(name)) + .withEntity(requestEntity.withContentType(MediaTypes.`application/json`)) + response <- invoke(request) + _ <- checkResponseStatus(response) + _ <- ignoreResponseBody(response) + } yield () + } + + override def deleteAll[L <: ListResource[_]]()( + implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] = + { + _deleteAll[L](rd, None) + } + + override def deleteAllSelected[L <: ListResource[_]](labelSelector: LabelSelector)( + implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext): Future[L] = + { + _deleteAll[L](rd, Some(labelSelector)) + } + + private def _deleteAll[L <: ListResource[_]](rd: ResourceDefinition[_], maybeLabelSelector: Option[LabelSelector])( + implicit fmt: Format[L], lc: LoggingContext): Future[L] = + { + val queryOpt = maybeLabelSelector map { ls => + Uri.Query("labelSelector" -> ls.toString) + } + if (log.isDebugEnabled) { + val lsInfo = maybeLabelSelector map { ls => s" with label selector '${ls.toString}'" } getOrElse "" + logDebug(s"[Delete request: resources of kind '${rd.spec.names.kind}'${lsInfo}") + } + val req = buildRequest(HttpMethods.DELETE, rd, None, query = queryOpt) + makeRequestReturningListResource[L](req) + } + + override def getPodLogSource(name: String, queryParams: Pod.LogQueryParams, namespace: Option[String] = None)( + implicit lc: LoggingContext): Future[Source[ByteString, _]] = + { + val targetNamespace=namespace.getOrElse(this.namespaceName) + val queryMap=queryParams.asMap + val query: Option[Uri.Query] = if (queryMap.isEmpty) { + None + } else { + Some(Uri.Query(queryMap)) + } + val nameComponent=s"${name}/log" + val rd = implicitly[ResourceDefinition[Pod]] + val request = buildRequest(HttpMethods.GET, rd, Some(nameComponent), query, targetNamespace) + invokeLog(request).flatMap { response => + val statusOptFut = checkResponseStatus(response) + statusOptFut map { + case Some(status) => + throw new K8SException(status) + case _ => + response.entity.dataBytes + } + } + } + + + // The Watch methods place a Watch on the specified resource on the Kubernetes cluster. + // The methods return Akka streams sources that will reactively emit a stream of updated + // values of the watched resources. + + override def watch[O <: ObjectResource](obj: O)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Source[WatchEvent[O], _]] = + { + watch(obj.name) + } + + // The Watch methods place a Watch on the specified resource on the Kubernetes cluster. + // The methods return Akka streams sources that will reactively emit a stream of updated + // values of the watched resources. + + override def watch[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Source[WatchEvent[O], _]] = + { + Watch.events(this, name, sinceResourceVersion, bufSize) + } + + // watch events on all objects of specified kind in current namespace + override def watchAll[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[Source[WatchEvent[O], _]] = + { + Watch.eventsOnKind[O](this, sinceResourceVersion, bufSize) + } + + override def watchContinuously[O <: ObjectResource](obj: O)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] = + { + watchContinuously(obj.name) + } + + override def watchContinuously[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] = + { + val options=ListOptions(resourceVersion = sinceResourceVersion, timeoutSeconds = Some(watchContinuouslyRequestTimeout.toSeconds) ) + WatchSource(this, buildLongPollingPool(), Some(name), options, bufSize) + } + + override def watchAllContinuously[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] = + { + val options=ListOptions(resourceVersion = sinceResourceVersion, timeoutSeconds = Some(watchContinuouslyRequestTimeout.toSeconds)) + WatchSource(this, buildLongPollingPool(), None, options, bufSize) + } + + override def watchWithOptions[O <: skuber.ObjectResource](options: ListOptions, bufsize: Int = 10000)( + implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Source[WatchEvent[O], _] = + { + WatchSource(this, buildLongPollingPool(), None, options, bufsize) + } + + private def buildLongPollingPool[O <: ObjectResource]() = { + LongPollingPool[WatchSource.Start[O]]( + clusterServerUri.scheme, + clusterServerUri.authority.host.address(), + clusterServerUri.effectivePort, + watchPoolIdleTimeout, + sslContext.map(new HttpsConnectionContext(_)), + ClientConnectionSettings(actorSystem.settings.config).withIdleTimeout(watchContinuouslyIdleTimeout) + ) + } + + // Operations on scale subresource + // Scale subresource Only exists for certain resource types like RC, RS, Deployment, StatefulSet so only those types + // define an implicit Scale.SubresourceSpec, which is required to be passed to these methods. + override def getScale[O <: ObjectResource](objName: String)( + implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext) : Future[Scale] = + { + val req = buildRequest(HttpMethods.GET, rd, Some(objName+ "/scale")) + makeRequestReturningObjectResource[Scale](req) + } + + @deprecated("use getScale followed by updateScale instead") + override def scale[O <: ObjectResource](objName: String, count: Int)( + implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext): Future[Scale] = + { + val scale = Scale( + apiVersion = sc.apiVersion, + metadata = ObjectMeta(name = objName, namespace = namespaceName), + spec = Scale.Spec(replicas = count) + ) + updateScale[O](objName, scale) + } + + override def updateScale[O <: ObjectResource](objName: String, scale: Scale)( + implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc:LoggingContext): Future[Scale] = + { + implicit val dispatcher = actorSystem.dispatcher + val marshal = Marshal(scale) + for { + requestEntity <- marshal.to[RequestEntity] + httpRequest = buildRequest(HttpMethods.PUT, rd, Some(s"${objName}/scale")) + .withEntity(requestEntity.withContentType(MediaTypes.`application/json`)) + scaledResource <- makeRequestReturningObjectResource[Scale](httpRequest) + } yield scaledResource + } + + override def patch[P <: Patch, O <: ObjectResource](name: String, patchData: P, namespace: Option[String] = None) + (implicit patchfmt: Writes[P], fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): Future[O] = { + val targetNamespace = namespace.getOrElse(namespaceName) + val contentType = patchData.strategy match { + case StrategicMergePatchStrategy => + CustomMediaTypes.`application/strategic-merge-patch+json` + case JsonMergePatchStrategy => + CustomMediaTypes.`application/merge-patch+json` + case JsonPatchStrategy => + MediaTypes.`application/json-patch+json` + } + logInfo(logConfig.logRequestBasicMetadata, s"Requesting patch of resource: { name:$name ... }") + logInfo(logConfig.logRequestFullObjectResource, s" Marshal and send: ${patchData.toString}") + val marshal = Marshal(patchData) + for { + requestEntity <- marshal.to[RequestEntity] + httpRequest = buildRequest(HttpMethods.PATCH, rd, Some(name), namespace = targetNamespace) + .withEntity(requestEntity.withContentType(contentType)) + newOrUpdatedResource <- makeRequestReturningObjectResource[O](httpRequest) + } yield newOrUpdatedResource + } + + /** + * Perform a Json merge patch on a resource + * The patch is passed a String type which should contain the JSON patch formatted per https://tools.ietf.org/html/rfc7386 + * It is a String type instead of a JSON object in order to allow clients to use their own favourite JSON library to create the + * patch, or alternatively to simply manually craft the JSON and insert it into a String. Also patches are generally expected to be + * relatively small, so storing the whole patch in memory should not be problematic. + * It is thus the responsibility of the client to ensure that the `patch` parameter contains a valid JSON merge patch entity for the + * targetted Kubernetes resource `obj` + * @param obj The resource to update with the patch + * @param patch A string containing the JSON patch entity + * @return The patched resource (in a Future) + */ + override def jsonMergePatch[O <: ObjectResource](obj: O, patch: String)( + implicit rd: ResourceDefinition[O], fmt: Format[O], lc:LoggingContext): Future[O] = + { + val patchRequestEntity = HttpEntity.Strict(`application/merge-patch+json`, ByteString(patch)) + val httpRequest = buildRequest(HttpMethods.PATCH, rd, Some(obj.name)).withEntity(patchRequestEntity) + makeRequestReturningObjectResource[O](httpRequest) + } + + // get API versions supported by the cluster + override def getServerAPIVersions(implicit lc: LoggingContext): Future[List[String]] = { + val url = clusterServer + "/api" + val noAuthReq = requestMaker(Uri(url), HttpMethods.GET) + val request = HTTPRequestAuth.addAuth(noAuthReq, requestAuth) + for { + response <- invoke(request) + apiVersionResource <- toKubernetesResponse[APIVersions](response) + } yield apiVersionResource.versions + } + + /* + * Execute a command in a pod + */ + override def exec( + podName: String, + command: Seq[String], + maybeContainerName: Option[String] = None, + maybeStdin: Option[Source[String, _]] = None, + maybeStdout: Option[Sink[String, _]] = None, + maybeStderr: Option[Sink[String, _]] = None, + tty: Boolean = false, + maybeClose: Option[Promise[Unit]] = None)(implicit lc: LoggingContext): Future[Unit] = + { + PodExecImpl.exec(this, podName, command, maybeContainerName, maybeStdin, maybeStdout, maybeStderr, tty, maybeClose) + } + + override def close: Unit = + { + isClosed = true + closeHook foreach { + _ () + } // invoke the specified close hook if specified + } + + /* + * Lightweight switching of namespace for applications that need to access multiple namespaces on same cluster + * and using same credentials and other configuration. + */ + override def usingNamespace(newNamespace: String): KubernetesClientImpl = + new KubernetesClientImpl(requestMaker, clusterServer, requestAuth, + newNamespace, watchContinuouslyRequestTimeout, watchContinuouslyIdleTimeout, + watchPoolIdleTimeout, watchSettings, podLogSettings, sslContext, logConfig, closeHook + ) + + private[skuber] def toKubernetesResponse[T](response: HttpResponse)(implicit reader: Reads[T], lc: LoggingContext): Future[T] = + { + val statusOptFut = checkResponseStatus(response) + statusOptFut flatMap { + case Some(status) => + throw new K8SException(status) + case None => + try { + Unmarshal(response).to[T] + } + catch { + case ex: Exception => + logError("Unable to unmarshal resource from response", ex) + throw new K8SException(Status(message = Some("Error unmarshalling resource from response"), details = Some(ex.getMessage))) + } + } + } + + // check for non-OK status, returning (in a Future) some Status object if not ok or otherwise None + private[skuber] def checkResponseStatus(response: HttpResponse)(implicit lc: LoggingContext): Future[Option[Status]] = + { + response.status.intValue match { + case code if code < 300 => + Future.successful(None) + case code => + // a non-success or unexpected status returned - we should normally have a Status in the response body + val statusFut: Future[Status] = Unmarshal(response).to[Status] + statusFut map { status => + if (log.isInfoEnabled) + log.info(s"[Response: non-ok status returned - $status") + Some(status) + } recover { case ex => + if (log.isErrorEnabled) + log.error(s"[Response: could not read Status for non-ok response, exception : ${ex.getMessage}]") + val status: Status = Status( + code = Some(response.status.intValue), + message = Some("Non-ok response and unable to parse Status from response body to get further details"), + details = Some(ex.getMessage) + ) + Some(status) + } + } + } + + /** + * Discards the response + * This is for requests (e.g. delete) for which we normally have no interest in the response body, but Akka Http + * requires us to drain it anyway + * (see https://doc.akka.io/docs/akka-http/current/scala/http/implications-of-streaming-http-entity.html) + * @param response the Http Response that we need to drain + * @return A Future[Unit] that will be set to Success or Failure depending on outcome of draining + */ + private def ignoreResponseBody(response: HttpResponse): Future[Unit] = { + response.discardEntityBytes().future.map(done => ()) + } +} + +object KubernetesClientImpl { + + def apply(k8sContext: Context, logConfig: LoggingConfig, closeHook: Option[() => Unit], appConfig: Config) + (implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClientImpl = + { + appConfig.checkValid(ConfigFactory.defaultReference(), "skuber") + + def getSkuberConfig[T](key: String, fromConfig: String => Option[T], default: T): T = { + val skuberConfigKey = s"skuber.$key" + if (appConfig.getIsNull(skuberConfigKey)) { + default + } else { + fromConfig(skuberConfigKey) match { + case None => default + case Some(t) => t + } + } + } + + def dispatcherFromConfig(configKey: String): Option[ExecutionContext] = if (appConfig.getString(configKey).isEmpty) { + None + } else { + Some(actorSystem.dispatchers.lookup(appConfig.getString(configKey))) + } + + implicit val dispatcher: ExecutionContext = getSkuberConfig("akka.dispatcher", dispatcherFromConfig, actorSystem.dispatcher) + + def durationFomConfig(configKey: String): Option[Duration] = Some(Duration.fromNanos(appConfig.getDuration(configKey).toNanos)) + + val watchIdleTimeout: Duration = getSkuberConfig("watch.idle-timeout", durationFomConfig, Duration.Inf) + val podLogIdleTimeout: Duration = getSkuberConfig("pod-log.idle-timeout", durationFomConfig, Duration.Inf) + + val watchContinuouslyRequestTimeout: Duration = getSkuberConfig("watch-continuously.request-timeout", durationFomConfig, 30.seconds) + val watchContinuouslyIdleTimeout: Duration = getSkuberConfig("watch-continuously.idle-timeout", durationFomConfig, 60.seconds) + val watchPoolIdleTimeout: Duration = getSkuberConfig("watch-continuously.pool-idle-timeout", durationFomConfig, 60.seconds) + + //The watch idle timeout needs to be greater than watch api request timeout + require(watchContinuouslyIdleTimeout > watchContinuouslyRequestTimeout) + + if (logConfig.logConfiguration) { + val log = Logging.getLogger(actorSystem, "skuber.api") + log.info("Using following context for connecting to Kubernetes cluster: {}", k8sContext) + } + + val sslContext = TLS.establishSSLContext(k8sContext) + + val theNamespaceName = k8sContext.namespace.name match { + case "" => "default" + case name => name + } + + val requestMaker = (uri: Uri, method: HttpMethod) => HttpRequest(method = method, uri = uri) + + val defaultClientSettings = ConnectionPoolSettings(actorSystem.settings.config) + val watchConnectionSettings = defaultClientSettings.connectionSettings.withIdleTimeout(watchIdleTimeout) + val watchSettings = defaultClientSettings.withConnectionSettings(watchConnectionSettings) + + val podLogConnectionSettings = defaultClientSettings.connectionSettings.withIdleTimeout(podLogIdleTimeout) + val podLogSettings = defaultClientSettings.withConnectionSettings(podLogConnectionSettings) + + new KubernetesClientImpl( + requestMaker, k8sContext.cluster.server, k8sContext.authInfo, + theNamespaceName, watchContinuouslyRequestTimeout, watchContinuouslyIdleTimeout, + watchPoolIdleTimeout, watchSettings, podLogSettings, sslContext, logConfig, closeHook + ) + } +} diff --git a/client/src/main/scala/skuber/api/client/package.scala b/client/src/main/scala/skuber/api/client/package.scala new file mode 100644 index 00000000..9069ed9d --- /dev/null +++ b/client/src/main/scala/skuber/api/client/package.scala @@ -0,0 +1,206 @@ +package skuber.api + +import java.time.Instant +import java.util.UUID + +import akka.NotUsed +import akka.actor.ActorSystem +import akka.http.scaladsl.model._ +import akka.stream.Materializer +import akka.stream.scaladsl.Flow +import com.typesafe.config.{Config, ConfigFactory} +import play.api.libs.functional.syntax._ +import play.api.libs.json.Reads._ +import play.api.libs.json._ + +import scala.sys.SystemProperties +import scala.util.Try +import skuber.{LabelSelector, ObjectResource} +import skuber.api.client.impl.KubernetesClientImpl + +/** + * @author David O'Riordan + */ +package object client { + + type Pool[T] = Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] + + final val sysProps = new SystemProperties + + // Certificates and keys can be specified in configuration either as paths to files or embedded PEM data + type PathOrData = Either[String, Array[Byte]] + + // K8S client API classes + final val defaultApiServerURL = "http://localhost:8080" + + // Patch content type(s) + final val `application/merge-patch+json`: MediaType.WithFixedCharset = + MediaType.customWithFixedCharset("application", "merge-patch+json", HttpCharsets.`UTF-8`) + + sealed trait AuthInfo + + sealed trait AccessTokenAuth extends AuthInfo { + def accessToken: String + } + + object NoAuth extends AuthInfo { + override def toString: String = "NoAuth" + } + + final case class BasicAuth(userName: String, password: String) extends AuthInfo { + override def toString: String = s"${getClass.getSimpleName}(userName=$userName,password=)" + } + + final case class TokenAuth(token: String) extends AccessTokenAuth { + + override def accessToken: String = token + + override def toString: String = s"${getClass.getSimpleName}(token=)" + } + + final case class CertAuth(clientCertificate: PathOrData, clientKey: PathOrData, user: Option[String]) extends AuthInfo { + override def toString: String = StringBuilder.newBuilder + .append(getClass.getSimpleName) + .append("(") + .append { + clientCertificate match { + case Left(certPath: String) => "clientCertificate=" + certPath + " " + case Right(_) => "clientCertificate= " + } + } + .append { + clientKey match { + case Left(certPath: String) => "clientKey=" + certPath + " " + case Right(_) => "clientKey= " + } + } + .append("userName=") + .append(user.getOrElse("")) + .append(" )") + .mkString + } + + sealed trait AuthProviderAuth extends AccessTokenAuth { + def name: String + } + + // 'jwt' supports an oidc id token per https://kubernetes.io/docs/admin/authentication/#option-1---oidc-authenticator + // - but does not yet support token refresh + final case class OidcAuth(idToken: String) extends AuthProviderAuth { + override val name = "oidc" + + override def accessToken: String = idToken + + override def toString = """OidcAuth(idToken=)""" + } + + final case class GcpAuth private(private val config: GcpConfiguration) extends AuthProviderAuth { + override val name = "gcp" + + @volatile private var refresh: GcpRefresh = new GcpRefresh(config.accessToken, config.expiry) + + def refreshGcpToken(): GcpRefresh = { + val output = config.cmd.execute() + Json.parse(output).as[GcpRefresh] + } + + def accessToken: String = this.synchronized { + if (refresh.expired) + refresh = refreshGcpToken() + + refresh.accessToken + } + + override def toString = + """GcpAuth(accessToken=)""".stripMargin + } + + final private[client] case class GcpRefresh(accessToken: String, expiry: Instant) { + def expired: Boolean = Instant.now.isAfter(expiry.minusSeconds(20)) + } + + private[client] object GcpRefresh { + implicit val gcpRefreshReads: Reads[GcpRefresh] = ( + (JsPath \ "credential" \ "access_token").read[String] and + (JsPath \ "credential" \ "token_expiry").read[Instant] + ) (GcpRefresh.apply _) + } + + final case class GcpConfiguration(accessToken: String, expiry: Instant, cmd: GcpCommand) + + final case class GcpCommand(cmd: String, args: String) { + + import scala.sys.process._ + + def execute(): String = s"$cmd $args".!! + } + + object GcpAuth { + def apply(accessToken: String, expiry: Instant, cmdPath: String, cmdArgs: String): GcpAuth = + new GcpAuth( + GcpConfiguration( + accessToken = accessToken, + expiry = expiry, + GcpCommand(cmdPath, cmdArgs) + ) + ) + } + + // for use with the Watch command + case class WatchEvent[T <: ObjectResource](_type: EventType.Value, _object: T) + + object EventType extends Enumeration { + type EventType = Value + val ADDED, MODIFIED, DELETED, ERROR = Value + } + + trait LoggingContext { + def output: String + } + + object LoggingContext { + implicit def lc : LoggingContext = RequestLoggingContext() + } + + case class RequestLoggingContext(requestId: String) extends LoggingContext { + def output = s"{ reqId=$requestId} }" + } + + object RequestLoggingContext { + def apply(): RequestLoggingContext = new RequestLoggingContext(UUID.randomUUID.toString) + } + + class K8SException(val status: Status) extends RuntimeException(status.toString) // we throw this when we receive a non-OK response + + type RequestContext = KubernetesClient // for backwards compatibility (with pre 2.1 clients) + + def init()(implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClient = { + init(defaultK8sConfig, defaultAppConfig) + } + + def init(config: Configuration)(implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClient = { + init(config.currentContext, LoggingConfig(), None, defaultAppConfig) + } + + def init(appConfig: Config)(implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClient = { + init(defaultK8sConfig.currentContext, LoggingConfig(), None, appConfig) + } + + def init(config: Configuration, appConfig: Config)(implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClient = { + init(config.currentContext, LoggingConfig(), None, appConfig) + } + + def init(k8sContext: Context, logConfig: LoggingConfig, closeHook: Option[() => Unit] = None) + (implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClient = { + init(k8sContext, logConfig, closeHook, defaultAppConfig) + } + + def init(k8sContext: Context, logConfig: LoggingConfig, closeHook: Option[() => Unit], appConfig: Config) + (implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClient = { + KubernetesClientImpl(k8sContext, logConfig, closeHook, appConfig) + } + + def defaultK8sConfig: Configuration = Configuration.defaultK8sConfig + + private def defaultAppConfig: Config = ConfigFactory.load() +} diff --git a/client/src/main/scala/skuber/api/package.scala b/client/src/main/scala/skuber/api/package.scala deleted file mode 100644 index 3825bc1b..00000000 --- a/client/src/main/scala/skuber/api/package.scala +++ /dev/null @@ -1,1094 +0,0 @@ -package skuber.api - -import scala.concurrent.{ExecutionContext, Future, Promise} -import scala.sys.SystemProperties -import scala.util.{Failure, Success, Try} -import java.net.URL -import java.time.Instant -import java.util.UUID - -import akka.{Done, NotUsed} -import akka.actor.ActorSystem -import akka.event.Logging -import akka.http.scaladsl.marshalling.Marshal -import akka.http.scaladsl.model._ -import akka.http.scaladsl.model.headers.RawHeader -import akka.http.scaladsl.settings.{ClientConnectionSettings, ConnectionPoolSettings} -import akka.http.scaladsl.unmarshalling.Unmarshal -import akka.http.scaladsl.{ConnectionContext, Http, HttpsConnectionContext} -import akka.stream.{Materializer, SinkShape} -import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Partition, Sink, Source} -import akka.util.ByteString -import com.typesafe.config.{Config, ConfigFactory} -import javax.net.ssl.SSLContext -import play.api.libs.json._ -import play.api.libs.json.Reads._ -import play.api.libs.functional.syntax._ -import skuber.api.security.{HTTPRequestAuth, TLS} -import skuber.json.PlayJsonSupportForAkkaHttp._ -import skuber.json.format._ -import skuber.json.format.apiobj._ -import skuber._ -import skuber.api.WatchSource.Start -import skuber.api.patch._ - -import scala.concurrent.duration._ - -/** - * @author David O'Riordan - */ -package object client { - - type Pool[T] = Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] - - final val sysProps = new SystemProperties - - // Certificates and keys can be specified in configuration either as paths to files or embedded PEM data - type PathOrData = Either[String,Array[Byte]] - - // K8S client API classes - final val defaultApiServerURL = "http://localhost:8080" - - // Patch content type(s) - final val `application/merge-patch+json`: MediaType.WithFixedCharset = - MediaType.customWithFixedCharset("application", "merge-patch+json", HttpCharsets.`UTF-8`) - - case class Cluster( - apiVersion: String = "v1", - server: String = defaultApiServerURL, - insecureSkipTLSVerify: Boolean = false, - certificateAuthority: Option[PathOrData] = None - ) - - case class Context( - cluster: Cluster = Cluster(), - authInfo: AuthInfo = NoAuth, - namespace: Namespace = Namespace.default - ) - - sealed trait AuthInfo - - sealed trait AccessTokenAuth extends AuthInfo { - def accessToken: String - } - - object NoAuth extends AuthInfo { - override def toString: String = "NoAuth" - } - - final case class BasicAuth(userName: String, password: String) extends AuthInfo { - override def toString: String = s"${getClass.getSimpleName}(userName=$userName,password=)" - } - - final case class TokenAuth(token: String) extends AccessTokenAuth { - - override def accessToken: String = token - - override def toString: String = s"${getClass.getSimpleName}(token=)" - } - - final case class CertAuth(clientCertificate: PathOrData, clientKey: PathOrData, user: Option[String]) extends AuthInfo { - override def toString: String = StringBuilder.newBuilder - .append(getClass.getSimpleName) - .append("(") - .append { - clientCertificate match { - case Left(certPath: String) => "clientCertificate=" + certPath + " " - case Right(_) => "clientCertificate= " - } - } - .append { - clientKey match { - case Left(certPath: String) => "clientKey=" + certPath + " " - case Right(_) => "clientKey= " - } - } - .append("userName=") - .append(user.getOrElse("")) - .append(" )") - .mkString - } - - sealed trait AuthProviderAuth extends AccessTokenAuth { - def name: String - } - - // 'jwt' supports an oidc id token per https://kubernetes.io/docs/admin/authentication/#option-1---oidc-authenticator - // - but does not yet support token refresh - final case class OidcAuth(idToken: String) extends AuthProviderAuth { - override val name = "oidc" - - override def accessToken: String = idToken - - override def toString = """OidcAuth(idToken=)""" - } - - final case class GcpAuth private(private val config: GcpConfiguration) extends AuthProviderAuth { - override val name = "gcp" - - @volatile private var refresh: GcpRefresh = new GcpRefresh(config.accessToken, config.expiry) - - def refreshGcpToken(): GcpRefresh = { - val output = config.cmd.execute() - Json.parse(output).as[GcpRefresh] - } - - def accessToken: String = this.synchronized { - if(refresh.expired) - refresh = refreshGcpToken() - - refresh.accessToken - } - - override def toString = - """GcpAuth(accessToken=)""".stripMargin - } - - final private[client] case class GcpRefresh(accessToken: String, expiry: Instant) { - def expired: Boolean = Instant.now.isAfter(expiry.minusSeconds(20)) - } - - private[client] object GcpRefresh { - implicit val gcpRefreshReads: Reads[GcpRefresh] = ( - (JsPath \ "credential" \ "access_token").read[String] and - (JsPath \ "credential" \ "token_expiry").read[Instant] - )(GcpRefresh.apply _) - } - - final case class GcpConfiguration(accessToken: String, expiry: Instant, cmd: GcpCommand) - - final case class GcpCommand(cmd: String, args: String) { - import scala.sys.process._ - def execute(): String = s"$cmd $args".!! - } - - object GcpAuth { - def apply(accessToken: String, expiry: Instant, cmdPath: String, cmdArgs: String): GcpAuth = - new GcpAuth( - GcpConfiguration( - accessToken = accessToken, - expiry = expiry, - GcpCommand(cmdPath, cmdArgs) - ) - ) - } - - // for use with the Watch command - case class WatchEvent[T <: ObjectResource](_type: EventType.Value, _object: T) - object EventType extends Enumeration { - type EventType = Value - val ADDED,MODIFIED,DELETED,ERROR = Value - } - - private def loggingEnabled(logEventType: String, fallback: Boolean) : Boolean= { - sysProps.get(s"skuber.log.$logEventType").map(_ => true).getOrElse(fallback) - } - - // This class offers a fine-grained choice over events to be logged by the API (applicable only if INFO level is enabled) - case class LoggingConfig( - logConfiguration: Boolean=loggingEnabled("config", true), // outputs configuration on initialisation) - logRequestBasic: Boolean=loggingEnabled("request", true), // logs method and URL for request - logRequestBasicMetadata: Boolean=loggingEnabled("request.metadata", false), // logs key resource metadata information if available - logRequestFullObjectResource: Boolean=loggingEnabled("request.object.full", false), // outputs full object resource if available - logResponseBasic: Boolean=loggingEnabled("response", true), // logs basic response info (status code) - logResponseBasicMetadata: Boolean=loggingEnabled("response.metadata", false), // logs some basic metadata from the returned resource, if available - logResponseFullObjectResource: Boolean=loggingEnabled("response.object.full", false), // outputs full received object resource, if available - logResponseListSize: Boolean=loggingEnabled("response.list.size", false), // logs size of any returned list resource - logResponseListNames: Boolean=loggingEnabled("response.list.names", false), // logs list of names of items in any returned list resource - logResponseFullListResource: Boolean= loggingEnabled("response.list.full", false) // outputs full contained object resources in list resources - ) - - trait LoggingContext { - def output: String - } - case class RequestLoggingContext(requestId: String) extends LoggingContext { - def output=s"{ reqId=$requestId} }" - } - object RequestLoggingContext { - def apply(): RequestLoggingContext=new RequestLoggingContext(UUID.randomUUID.toString) - } - - class RequestContext(val requestMaker: (Uri, HttpMethod) => HttpRequest, - val clusterServer: String, - val requestAuth: AuthInfo, - val namespaceName: String, - val watchContinuouslyRequestTimeout: Duration, - val watchContinuouslyIdleTimeout: Duration, - val watchPoolIdleTimeout: Duration, - val watchSettings: ConnectionPoolSettings, - val podLogSettings: ConnectionPoolSettings, - val sslContext: Option[SSLContext], - val logConfig: LoggingConfig, - val closeHook: Option[() => Unit]) - (implicit val actorSystem: ActorSystem, val materializer: Materializer, val executionContext: ExecutionContext) { - - val log = Logging.getLogger(actorSystem, "skuber.api") - - val connectionContext = sslContext - .map { ssl => - ConnectionContext.https(ssl, enabledProtocols = Some(scala.collection.immutable.Seq("TLSv1.2", "TLSv1"))) - } - .getOrElse(Http().defaultClientHttpsContext) - - private val clusterServerUri = Uri(clusterServer) - - private var isClosed = false - - private[skuber] def invokeWatch(request: HttpRequest)(implicit lc: LoggingContext): Future[HttpResponse] = invoke(request, watchSettings) - - private[skuber] def invokeLog(request: HttpRequest)(implicit lc: LoggingContext): Future[HttpResponse] = invoke(request, podLogSettings) - - private[skuber] def invoke(request: HttpRequest, settings: ConnectionPoolSettings = ConnectionPoolSettings(actorSystem))(implicit lc: LoggingContext): Future[HttpResponse] = { - if (isClosed) { - logError("Attempt was made to invoke request on closed API request context") - throw new IllegalStateException("Request context has been closed") - } - logInfo(logConfig.logRequestBasic, s"about to send HTTP request: ${request.method.value} ${request.uri.toString}") - - val responseFut = Http().singleRequest(request, settings = settings, connectionContext = connectionContext) - responseFut onComplete { - case Success(response) => logInfo(logConfig.logResponseBasic,s"received response with HTTP status ${response.status.intValue()}") - case Failure(ex) => logError("HTTP request resulted in an unexpected exception",ex) - } - responseFut - } - - private[skuber] def buildRequest[T <: TypeMeta]( - method: HttpMethod, - rd: ResourceDefinition[_], - nameComponent: Option[String], - query: Option[Uri.Query] = None, - watch: Boolean = false, - namespace: String = namespaceName): HttpRequest = - { - val nsPathComponent = if (rd.spec.scope == ResourceSpecification.Scope.Namespaced) { - Some("namespaces/" + namespace) - } else { - None - } - - val watchPathComponent = if (watch) Some("watch") else None - - val k8sUrlOptionalParts = List( - clusterServer, - rd.spec.apiPathPrefix, - rd.spec.group, - rd.spec.defaultVersion, - watchPathComponent, - nsPathComponent, - rd.spec.names.plural, - nameComponent) - - val k8sUrlParts = k8sUrlOptionalParts collect { - case p: String if p != "" => p - case Some(p: String) if p != "" => p - } - - val k8sUrlStr = k8sUrlParts.mkString("/") - - val uri = query.map { q => - Uri(k8sUrlStr).withQuery(q) - }.getOrElse { - Uri(k8sUrlStr) - } - - val req = requestMaker(uri, method) - HTTPRequestAuth.addAuth(req, requestAuth) - } - - private[skuber] def logInfo(enabledLogEvent: Boolean, msg: => String)(implicit lc: LoggingContext) = - { - if (log.isInfoEnabled && enabledLogEvent) { - log.info(s"[ ${lc.output} - ${msg}]") - } - } - - private[skuber] def logInfoOpt(enabledLogEvent: Boolean, msgOpt: => Option[String])(implicit lc: LoggingContext) = - { - if (log.isInfoEnabled && enabledLogEvent) { - msgOpt foreach { msg => - log.info(s"[ ${lc.output} - ${msg}]") - } - } - } - - private[skuber] def logWarn(msg: String)(implicit lc: LoggingContext) = - { - log.error(s"[ ${lc.output} - $msg ]") - } - - private[skuber] def logError(msg: String)(implicit lc: LoggingContext) = - { - log.error(s"[ ${lc.output} - $msg ]") - } - - private[skuber] def logError(msg: String, ex: Throwable)(implicit lc: LoggingContext) = - { - log.error(ex, s"[ ${lc.output} - $msg ]") - } - - private[skuber] def logDebug(msg : => String)(implicit lc: LoggingContext) = { - if (log.isDebugEnabled) - log.debug(s"[ ${lc.output} - $msg ]") - } - - private[skuber] def logRequestObjectDetails[O <: ObjectResource](method: HttpMethod,resource: O)(implicit lc: LoggingContext) = { - logInfoOpt(logConfig.logRequestBasicMetadata, { - val name = resource.name - val version = resource.metadata.resourceVersion - method match { - case HttpMethods.PUT | HttpMethods.PATCH => Some(s"Requesting update of resource: { name:$name, version:$version ... }") - case HttpMethods.POST => Some(s"Requesting creation of resource: { name: $name ...}") - case _ => None - } - } - ) - logInfo(logConfig.logRequestFullObjectResource, s" Marshal and send: ${resource.toString}") - } - - private[skuber] def logReceivedObjectDetails[O <: ObjectResource](resource: O)(implicit lc: LoggingContext) = - { - logInfo(logConfig.logResponseBasicMetadata, s" resource: { kind:${resource.kind} name:${resource.name} version:${resource.metadata.resourceVersion} ... }") - logInfo(logConfig.logResponseFullObjectResource, s" received and parsed: ${resource.toString}") - } - - private[skuber] def logReceivedListDetails[L <: ListResource[_]](result: L)(implicit lc: LoggingContext) = - { - logInfo(logConfig.logResponseBasicMetadata,s"received list resource of kind ${result.kind}") - logInfo(logConfig.logResponseListSize,s"number of items in received list resource: ${result.items.size}") - logInfo(logConfig.logResponseListNames, s"received ${result.kind} contains item(s): ${result.itemNames}]") - logInfo(logConfig.logResponseFullListResource, s" Unmarshalled list resource: ${result.toString}") - } - - private[skuber] def makeRequestReturningObjectResource[O <: ObjectResource](httpRequest: HttpRequest)( - implicit fmt: Format[O], lc: LoggingContext): Future[O] = - { - for { - httpResponse <- invoke(httpRequest) - result <- toKubernetesResponse[O](httpResponse) - _ = logReceivedObjectDetails(result) - } yield result - } - - private[skuber] def makeRequestReturningListResource[L <: ListResource[_]](httpRequest: HttpRequest)( - implicit fmt: Format[L], lc: LoggingContext): Future[L] = - { - for { - httpResponse <- invoke(httpRequest) - result <- toKubernetesResponse[L](httpResponse) - _ = logReceivedListDetails(result) - } yield result - } - - /** - * Modify the specified K8S resource using a given HTTP method. The modified resource is returned. - * The create, update and partiallyUpdate methods all call this, just passing different HTTP methods - */ - private[skuber] def modify[O <: ObjectResource](method: HttpMethod)(obj: O)( - implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = - { - // if this is a POST we don't include the resource name in the URL - val nameComponent: Option[String] = method match { - case HttpMethods.POST => None - case _ => Some(obj.name) - } - modify(method, obj, nameComponent) - } - - private[skuber] def modify[O <: ObjectResource](method: HttpMethod, obj: O, nameComponent: Option[String])( - implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = - { - // Namespace set in the object metadata (if set) has higher priority than that of the - // request context (see Issue #204) - val targetNamespace = if (obj.metadata.namespace.isEmpty) namespaceName else obj.metadata.namespace - - logRequestObjectDetails(method, obj) - val marshal = Marshal(obj) - for { - requestEntity <- marshal.to[RequestEntity] - httpRequest = buildRequest(method, rd, nameComponent, namespace = targetNamespace) - .withEntity(requestEntity.withContentType(MediaTypes.`application/json`)) - newOrUpdatedResource <- makeRequestReturningObjectResource[O](httpRequest) - } yield newOrUpdatedResource - } - - def create[O <: ObjectResource](obj: O)( - implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext=RequestLoggingContext()): Future[O] = - { - modify(HttpMethods.POST)(obj) - } - - def update[O <: ObjectResource](obj: O)( - implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext=RequestLoggingContext()): Future[O] = - { - modify(HttpMethods.PUT)(obj) - } - - def updateStatus[O <: ObjectResource](obj: O)(implicit - fmt: Format[O], - rd: ResourceDefinition[O], - lc: LoggingContext=RequestLoggingContext(), - statusEv: HasStatusSubresource[O]): Future[O] = - { - val statusSubresourcePath=s"${obj.name}/status" - modify(HttpMethods.PUT,obj,Some(statusSubresourcePath)) - } - - def getStatus[O <: ObjectResource](name: String)(implicit - fmt: Format[O], - rd: ResourceDefinition[O], - lc: LoggingContext=RequestLoggingContext(), - statusEv: HasStatusSubresource[O]): Future[O] = - { - _get[O](s"${name}/status") - } - - def getNamespaceNames(implicit lc: LoggingContext=RequestLoggingContext()): Future[List[String]] = - { - list[NamespaceList].map { namespaceList => - val namespaces = namespaceList.items - namespaces.map(_.name) - } - } - - /* - * List by namespace returns a map of namespace (specified by name e.g. "default", "kube-sys") to the list of objects - * of the specified kind in said namespace. All namespaces in the cluster are included in the map. - * For example, it can be used to get a single list of all objects of the given kind across the whole cluster - * e.g. val allPodsInCluster: Future[List[Pod]] = listByNamespace[Pod] map { _.values.flatMap(_.items) } - * which supports the feature requested in issue #20 - */ - def listByNamespace[L <: ListResource[_]]() - (implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext=RequestLoggingContext()): Future[Map[String, L]] = - { - listByNamespace[L](rd) - } - - private def listByNamespace[L <: ListResource[_]](rd: ResourceDefinition[_]) - (implicit fmt: Format[L],lc: LoggingContext): Future[Map[String, L]] = - { - val nsNamesFut: Future[List[String]] = getNamespaceNames - val tuplesFut: Future[List[(String, L)]] = nsNamesFut flatMap { nsNames: List[String] => - Future.sequence(nsNames map { (nsName: String) => - listInNamespace[L](nsName, rd) map { l => (nsName, l) } - }) - } - tuplesFut map { - _.toMap[String, L] - } - } - - /* - * List all objects of given kind in the specified namespace on the cluster - */ - def listInNamespace[L <: ListResource[_]](theNamespace: String) - (implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext=RequestLoggingContext()): Future[L] = - { - listInNamespace[L](theNamespace, rd) - } - - private def listInNamespace[L <: ListResource[_]](theNamespace: String, rd: ResourceDefinition[_]) - (implicit fmt: Format[L], lc: LoggingContext): Future[L] = - { - val req = buildRequest(HttpMethods.GET, rd, None, namespace = theNamespace) - makeRequestReturningListResource[L](req) - } - - /* - * List objects of specific resource kind in current namespace - */ - def list[L <: ListResource[_]]()( - implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext=RequestLoggingContext()): Future[L] = - { - _list[L](rd, None) - } - - /* - * Retrieve the list of objects of given type in the current namespace that match the supplied label selector - */ - def listSelected[L <: ListResource[_]](labelSelector: LabelSelector)( - implicit fmt: Format[L], rd: ResourceDefinition[L],lc: LoggingContext=RequestLoggingContext()): Future[L] = - { - _list[L](rd, Some(labelSelector)) - } - - private def _list[L <: ListResource[_]](rd: ResourceDefinition[_], maybeLabelSelector: Option[LabelSelector])( - implicit fmt: Format[L], lc: LoggingContext=RequestLoggingContext()): Future[L] = - { - val queryOpt = maybeLabelSelector map { ls => - Uri.Query("labelSelector" -> ls.toString) - } - if (log.isDebugEnabled) { - val lsInfo = maybeLabelSelector map { ls => s" with label selector '${ls.toString}'" } getOrElse "" - logDebug(s"[List request: resources of kind '${rd.spec.names.kind}'${lsInfo}") - } - val req = buildRequest(HttpMethods.GET, rd, None, query = queryOpt) - makeRequestReturningListResource[L](req) - } - - def getOption[O <: ObjectResource](name: String)( - implicit fmt: Format[O], rd: ResourceDefinition[O],lc: LoggingContext=RequestLoggingContext()): Future[Option[O]] = - { - _get[O](name) map { result => - Some(result) - } recover { - case ex: K8SException if ex.status.code.contains(StatusCodes.NotFound.intValue) => None - } - } - - def get[O <: ObjectResource](name: String)( - implicit fmt: Format[O], rd: ResourceDefinition[O],lc: LoggingContext=RequestLoggingContext()): Future[O] = { - _get[O](name) - } - - def getInNamespace[O <: ObjectResource](name: String, namespace: String)( - implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext=RequestLoggingContext()): Future[O] = - { - _get[O](name, namespace) - } - - private[api] def _get[O <: ObjectResource](name: String, namespace: String = namespaceName)( - implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext): Future[O] = - { - val req = buildRequest(HttpMethods.GET, rd, Some(name), namespace = namespace) - makeRequestReturningObjectResource[O](req) - } - - def delete[O <: ObjectResource](name: String, gracePeriodSeconds: Int = -1)( - implicit rd: ResourceDefinition[O], lc: LoggingContext=RequestLoggingContext()): Future[Unit] = - { - val grace=if (gracePeriodSeconds >= 0) Some(gracePeriodSeconds) else None - val options = DeleteOptions(gracePeriodSeconds = grace) - deleteWithOptions[O](name, options) - } - - def deleteWithOptions[O <: ObjectResource](name: String, options: DeleteOptions)( - implicit rd: ResourceDefinition[O], lc: LoggingContext=RequestLoggingContext()): Future[Unit] = - { - val marshalledOptions = Marshal(options) - for { - requestEntity <- marshalledOptions.to[RequestEntity] - request = buildRequest(HttpMethods.DELETE, rd, Some(name)) - .withEntity(requestEntity.withContentType(MediaTypes.`application/json`)) - response <- invoke(request) - _ <- checkResponseStatus(response) - _ <- ignoreResponseBody(response) - } yield () - } - - def deleteAll[L <: ListResource[_]]()( - implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext=RequestLoggingContext()): Future[L] = - { - _deleteAll[L](rd, None) - } - - def deleteAllSelected[L <: ListResource[_]](labelSelector: LabelSelector)( - implicit fmt: Format[L], rd: ResourceDefinition[L], lc: LoggingContext=RequestLoggingContext()): Future[L] = - { - _deleteAll[L](rd, Some(labelSelector)) - } - - private def _deleteAll[L <: ListResource[_]](rd: ResourceDefinition[_], maybeLabelSelector: Option[LabelSelector])( - implicit fmt: Format[L], lc: LoggingContext=RequestLoggingContext()): Future[L] = - { - val queryOpt = maybeLabelSelector map { ls => - Uri.Query("labelSelector" -> ls.toString) - } - if (log.isDebugEnabled) { - val lsInfo = maybeLabelSelector map { ls => s" with label selector '${ls.toString}'" } getOrElse "" - logDebug(s"[Delete request: resources of kind '${rd.spec.names.kind}'${lsInfo}") - } - val req = buildRequest(HttpMethods.DELETE, rd, None, query = queryOpt) - makeRequestReturningListResource[L](req) - } - - def getPodLogSource(name: String, queryParams: Pod.LogQueryParams, namespace: Option[String] = None)( - implicit lc: LoggingContext=RequestLoggingContext()): Future[Source[ByteString, _]] = - { - val targetNamespace=namespace.getOrElse(this.namespaceName) - val queryMap=queryParams.asMap - val query: Option[Uri.Query] = if (queryMap.isEmpty) { - None - } else { - Some(Uri.Query(queryMap)) - } - val nameComponent=s"${name}/log" - val rd = implicitly[ResourceDefinition[Pod]] - val request = buildRequest(HttpMethods.GET, rd, Some(nameComponent), query, false, targetNamespace) - invokeLog(request).flatMap { response => - val statusOptFut = checkResponseStatus(response) - statusOptFut map { - case Some(status) => - throw new K8SException(status) - case _ => - response.entity.dataBytes - } - } - } - - def watch[O <: ObjectResource](obj: O)( - implicit fmt: Format[O], rd: ResourceDefinition[O]): Future[Source[WatchEvent[O], _]] = - { - watch(obj.name) - } - - // The Watch methods place a Watch on the specified resource on the Kubernetes cluster. - // The methods return Play Framework enumerators that will reactively emit a stream of updated - // values of the watched resources. - def watch[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( - implicit fmt: Format[O], rd: ResourceDefinition[O],lc: LoggingContext=RequestLoggingContext()): Future[Source[WatchEvent[O], _]] = - { - Watch.events(this, name, sinceResourceVersion, bufSize) - } - - // watch events on all objects of specified kind in current namespace - def watchAll[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( - implicit fmt: Format[O], rd: ResourceDefinition[O],lc: LoggingContext=RequestLoggingContext()): Future[Source[WatchEvent[O], _]] = - { - Watch.eventsOnKind[O](this, sinceResourceVersion, bufSize) - } - - def watchContinuously[O <: ObjectResource](obj: O)( - implicit fmt: Format[O], rd: ResourceDefinition[O]): Source[WatchEvent[O], _] = - { - watchContinuously(obj.name) - } - - def watchContinuously[O <: ObjectResource](name: String, sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( - implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): Source[WatchEvent[O], _] = - { - WatchSource(this, - buildLongPollingPool(), - Some(name), sinceResourceVersion, - watchContinuouslyRequestTimeout, bufSize - ) - } - - def watchAllContinuously[O <: ObjectResource](sinceResourceVersion: Option[String] = None, bufSize: Int = 10000)( - implicit fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): Source[WatchEvent[O], _] = - { - WatchSource(this, - buildLongPollingPool(), - None, sinceResourceVersion, - watchContinuouslyRequestTimeout, bufSize - ) - } - - private def buildLongPollingPool[O <: ObjectResource]() = { - LongPollingPool[Start[O]]( - clusterServerUri.scheme, - clusterServerUri.authority.host.address(), - clusterServerUri.effectivePort, - watchPoolIdleTimeout, - sslContext.map(new HttpsConnectionContext(_)), - ClientConnectionSettings(actorSystem.settings.config).withIdleTimeout(watchContinuouslyIdleTimeout) - ) - } - - // Operations on scale subresource - // Scale subresource Only exists for certain resource types like RC, RS, Deployment, StatefulSet so only those types - // define an implicit Scale.SubresourceSpec, which is required to be passed to these methods. - def getScale[O <: ObjectResource](objName: String)( - implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc: LoggingContext=RequestLoggingContext()) : Future[Scale] = - { - val req = buildRequest(HttpMethods.GET, rd, Some(objName+ "/scale")) - makeRequestReturningObjectResource[Scale](req) - } - - @deprecated("use getScale followed by updateScale instead") - def scale[O <: ObjectResource](objName: String, count: Int)( - implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc:LoggingContext=RequestLoggingContext()): Future[Scale] = - { - val scale = Scale( - apiVersion = sc.apiVersion, - metadata = ObjectMeta(name = objName, namespace = namespaceName), - spec = Scale.Spec(replicas = count) - ) - updateScale[O](objName, scale) - } - - def updateScale[O <: ObjectResource](objName: String, scale: Scale)( - implicit rd: ResourceDefinition[O], sc: Scale.SubresourceSpec[O], lc:LoggingContext=RequestLoggingContext()): Future[Scale] = - { - implicit val dispatcher = actorSystem.dispatcher - val marshal = Marshal(scale) - for { - requestEntity <- marshal.to[RequestEntity] - httpRequest = buildRequest(HttpMethods.PUT, rd, Some(s"${objName}/scale")) - .withEntity(requestEntity.withContentType(MediaTypes.`application/json`)) - scaledResource <- makeRequestReturningObjectResource[Scale](httpRequest) - } yield scaledResource - } - - /** - * Perform a Json merge patch on a resource - * The patch is passed a String type which should contain the JSON patch formatted per https://tools.ietf.org/html/rfc7386 - * It is a String type instead of a JSON object in order to allow clients to use their own favourite JSON library to create the - * patch, or alternatively to simply manually craft the JSON and insert it into a String. Also patches are generally expected to be - * relatively small, so storing the whole patch in memory should not be problematic. - * It is thus the responsibility of the client to ensure that the `patch` parameter contains a valid JSON merge patch entity for the - * targetted Kubernetes resource `obj` - * @param obj The resource to update with the patch - * @param patch A string containing the JSON patch entity - * @return The patched resource (in a Future) - */ - def jsonMergePatch[O <: ObjectResource](obj: O, patch: String)( - implicit rd: ResourceDefinition[O], fmt: Format[O], lc:LoggingContext=RequestLoggingContext()): Future[O] = - { - val patchRequestEntity = HttpEntity.Strict(`application/merge-patch+json`, ByteString(patch)) - val httpRequest = buildRequest(HttpMethods.PATCH, rd, Some(obj.name)).withEntity(patchRequestEntity) - makeRequestReturningObjectResource[O](httpRequest) - } - - def patch[P <: Patch, O <: ObjectResource](name: String, patchData: P, namespace: Option[String] = None) - (implicit patchfmt: Writes[P], fmt: Format[O], rd: ResourceDefinition[O], lc: LoggingContext = RequestLoggingContext()): Future[O] = { - val targetNamespace = namespace.getOrElse(namespaceName) - - val contentType = patchData.strategy match { - case StrategicMergePatchStrategy => - CustomMediaTypes.`application/strategic-merge-patch+json` - case JsonMergePatchStrategy => - CustomMediaTypes.`application/merge-patch+json` - case JsonPatchStrategy => - MediaTypes.`application/json-patch+json` - } - - logInfo(logConfig.logRequestBasicMetadata, s"Requesting patch of resource: { name:$name ... }") - logInfo(logConfig.logRequestFullObjectResource, s" Marshal and send: ${patchData.toString}") - - val marshal = Marshal(patchData) - for { - requestEntity <- marshal.to[RequestEntity] - httpRequest = buildRequest(HttpMethods.PATCH, rd, Some(name), namespace = targetNamespace) - .withEntity(requestEntity.withContentType(contentType)) - newOrUpdatedResource <- makeRequestReturningObjectResource[O](httpRequest) - } yield newOrUpdatedResource - } - - // get API versions supported by the cluster - def getServerAPIVersions(implicit lc: LoggingContext=RequestLoggingContext()): Future[List[String]] = { - val url = clusterServer + "/api" - val noAuthReq = requestMaker(Uri(url), HttpMethods.GET) - val request = HTTPRequestAuth.addAuth(noAuthReq, requestAuth) - for { - response <- invoke(request) - apiVersionResource <- toKubernetesResponse[APIVersions](response) - } yield apiVersionResource.versions - } - - def exec(podName: String, command: Seq[String], maybeContainerName: Option[String] = None, - maybeStdin: Option[Source[String, _]] = None, - maybeStdout: Option[Sink[String, _]] = None, - maybeStderr: Option[Sink[String, _]] = None, - tty: Boolean = false, - maybeClose: Option[Promise[Unit]] = None): Future[Unit] = { - val containerPrintName = maybeContainerName.getOrElse("") - log.info(s"Trying to connect to container ${containerPrintName} of pod ${podName}") - - // Compose queries - var queries: Seq[(String, String)] = Seq( - "stdin" -> maybeStdin.isDefined.toString, - "stdout" -> maybeStdout.isDefined.toString, - "stderr" -> maybeStderr.isDefined.toString, - "tty" -> tty.toString - ) - maybeContainerName.foreach { containerName => - queries ++= Seq("container" -> containerName) - } - queries ++= command.map("command" -> _) - - // Determine scheme and connection context based on SSL context - val (scheme, connectionContext) = sslContext match { - case Some(ssl) => - ("wss",ConnectionContext.https(ssl, enabledProtocols = Some(scala.collection.immutable.Seq("TLSv1.2", "TLSv1")))) - case None => - ("ws", Http().defaultClientHttpsContext) - } - - // Compose URI - val uri = Uri(clusterServer) - .withScheme(scheme) - .withPath(Uri.Path(s"/api/v1/namespaces/$namespaceName/pods/${podName}/exec")) - .withQuery(Uri.Query(queries: _*)) - - // Compose headers - var headers: List[HttpHeader] = List(RawHeader("Accept", "*/*")) - headers ++= HTTPRequestAuth.getAuthHeader(requestAuth).map(a => List(a)).getOrElse(List()) - - // Convert `String` to `ByteString`, then prepend channel bytes - val source: Source[ws.Message, Promise[Option[ws.Message]]] = maybeStdin.getOrElse(Source.empty).viaMat(Flow[String].map { s => - ws.BinaryMessage(ByteString(0).concat(ByteString(s))) - })(Keep.right).concatMat(Source.maybe[ws.Message])(Keep.right) - - // Split the sink from websocket into stdout and stderr then remove first bytes which indicate channels - val sink: Sink[ws.Message, NotUsed] = Sink.fromGraph(GraphDSL.create() { implicit builder => - import GraphDSL.Implicits._ - - val partition = builder.add(Partition[ws.Message](2, { - case bm: ws.BinaryMessage.Strict if bm.data(0) == 1 => - 0 - case bm: ws.BinaryMessage.Strict if bm.data(0) == 2 => - 1 - })) - - def convertSink = Flow[ws.Message].map[String] { - case bm: ws.BinaryMessage.Strict => - bm.data.utf8String.substring(1) - } - - partition.out(0) ~> convertSink ~> maybeStdout.getOrElse(Sink.ignore) - partition.out(1) ~> convertSink ~> maybeStderr.getOrElse(Sink.ignore) - - SinkShape(partition.in) - }) - - - // Make a flow from the source to the sink - val flow: Flow[ws.Message, ws.Message, Promise[Option[ws.Message]]] = Flow.fromSinkAndSourceMat(sink, source)(Keep.right) - - // upgradeResponse completes or fails when the connection succeeds or fails - // and promise controls the connection close timing - val (upgradeResponse, promise) = Http().singleWebSocketRequest(ws.WebSocketRequest(uri, headers, subprotocol = Option("channel.k8s.io")), flow, connectionContext) - - val connected = upgradeResponse.map { upgrade => - // just like a regular http request we can access response status which is available via upgrade.response.status - // status code 101 (Switching Protocols) indicates that server support WebSockets - if (upgrade.response.status == StatusCodes.SwitchingProtocols) { - Done - } else { - val message = upgrade.response.entity.toStrict(1000.millis).map(_.data.utf8String) - throw new K8SException(Status(message = - Some(s"Connection failed with status ${upgrade.response.status}"), - details = Some(message), code = Some(upgrade.response.status.intValue()))) - } - } - - val close = maybeClose.getOrElse(Promise.successful(())) - connected.foreach { _ => - log.info(s"Connected to container ${containerPrintName} of pod ${podName}") - close.future.foreach { _ => - log.info(s"Close the connection of container ${containerPrintName} of pod ${podName}") - promise.success(None) - } - } - Future.sequence(Seq(connected, close.future, promise.future)).map { _ => () } - } - - def close: Unit = - { - isClosed = true - closeHook foreach { - _ () - } // invoke the specified close hook if specified - } - - /* - * Lightweight switching of namespace for applications that need to access multiple namespaces on same cluster - * and using same credentials and other configuration. - */ - def usingNamespace(newNamespace: String): RequestContext = - new RequestContext(requestMaker, clusterServer, requestAuth, - newNamespace, watchContinuouslyRequestTimeout, watchContinuouslyIdleTimeout, - watchPoolIdleTimeout, watchSettings, podLogSettings, sslContext, logConfig, closeHook - ) - - private[skuber] def toKubernetesResponse[T](response: HttpResponse)(implicit reader: Reads[T], lc: LoggingContext): Future[T] = - { - val statusOptFut = checkResponseStatus(response) - statusOptFut flatMap { - case Some(status) => - throw new K8SException(status) - case None => - try { - Unmarshal(response).to[T] - } - catch { - case ex: Exception => - logError("Unable to unmarshal resource from response", ex) - throw new K8SException(Status(message = Some("Error unmarshalling resource from response"), details = Some(ex.getMessage))) - } - } - } - - // check for non-OK status, returning (in a Future) some Status object if not ok or otherwise None - private[skuber] def checkResponseStatus(response: HttpResponse)(implicit lc: LoggingContext): Future[Option[Status]] = - { - response.status.intValue match { - case code if code < 300 => - Future.successful(None) - case code => - // a non-success or unexpected status returned - we should normally have a Status in the response body - val statusFut: Future[Status] = Unmarshal(response).to[Status] - statusFut map { status => - if (log.isInfoEnabled) - log.info(s"[Response: non-ok status returned - $status") - Some(status) - } recover { case ex => - if (log.isErrorEnabled) - log.error(s"[Response: could not read Status for non-ok response, exception : ${ex.getMessage}]") - Some(Status( - code = Some(response.status.intValue), - message = Some("Non-ok response and unable to parse Status from response body to get further details"), - details = Some(ex.getMessage) - )) - } - } - } - - /** - * Discards the response - * This is for requests (e.g. delete) for which we normally have no interest in the response body, but Akka Http - * requires us to drain it anyway - * (see https://doc.akka.io/docs/akka-http/current/scala/http/implications-of-streaming-http-entity.html) - * @param response the Http Response that we need to drain - * @return A Future[Unit] that will be set to Success or Failure depending on outcome of draining - */ - private def ignoreResponseBody(response: HttpResponse): Future[Unit] = { - response.discardEntityBytes().future.map(done => ()) - } - } - - // Status will usually be returned by Kubernetes when an error occurs with a request - case class Status( - apiVersion: String = "v1", - kind: String = "Status", - metadata: ListMeta = ListMeta(), - status: Option[String] = None, - message: Option[String]= None, - reason: Option[String] = None, - details: Option[Any] = None, - code: Option[Int] = None // HTTP status code - ) - - class K8SException(val status: Status) extends RuntimeException (status.toString) // we throw this when we receive a non-OK response - - def init()(implicit actorSystem: ActorSystem, materializer: Materializer): RequestContext = { - // Initialising without explicit Kubernetes Configuration. If SKUBER_URL environment variable is set then it - // is assumed to point to a kubectl proxy running at the URL specified so we configure to use that, otherwise if not set - // then we try to configure from a kubeconfig file located as follows: - // - // If SKUBER_CONFIG is set, then use the path specified e.g. - // `SKUBER_CONFIG=file:///etc/kubeconfig` will load config from the file `/etc/kubeconfig` - // Note: `SKUBER_CONFIG=file` is a special case: it will load config from the default kubeconfig path `$HOME/.kube/config` - // Note: `SKUBER_CONFIG=proxy` is also special: it configures skuber to connect via localhost:8080 - // If SKUBER_CONFIG is also not set, then we next fallback to the standard Kubernetes environment variable KUBECONFIG - // If neither of these is set, then finally it tries the standard kubeconfig file location `$HOME/.kube/config` before - // giving up and throwing an exception - // - init(defaultK8sConfig, defaultAppConfig) - } - - def init(config: Configuration)(implicit actorSystem: ActorSystem, materializer: Materializer): RequestContext = { - init(config.currentContext, LoggingConfig(), None, defaultAppConfig) - } - - def init(appConfig: Config)(implicit actorSystem: ActorSystem, materializer: Materializer): RequestContext = { - init(defaultK8sConfig.currentContext, LoggingConfig(), None, appConfig) - } - - def init(config: Configuration, appConfig: Config)(implicit actorSystem: ActorSystem, materializer: Materializer): RequestContext = { - init(config.currentContext, LoggingConfig(), None, appConfig) - } - - def init(k8sContext: Context, logConfig: LoggingConfig, closeHook: Option[() => Unit] = None) - (implicit actorSystem: ActorSystem, materializer: Materializer): RequestContext = { - init(k8sContext, logConfig, closeHook, defaultAppConfig) - } - - def init(k8sContext: Context, logConfig: LoggingConfig, closeHook: Option[() => Unit], appConfig: Config) - (implicit actorSystem: ActorSystem, materializer: Materializer): RequestContext = - { - appConfig.checkValid(ConfigFactory.defaultReference(), "skuber") - - def getSkuberConfig[T](key: String, fromConfig: String => Option[T], default: T): T = { - val skuberConfigKey = s"skuber.$key" - if (appConfig.getIsNull(skuberConfigKey)) { - default - } else { - fromConfig(skuberConfigKey) match { - case None => default - case Some(t) => t - } - } - } - - def dispatcherFromConfig(configKey: String): Option[ExecutionContext] = if (appConfig.getString(configKey).isEmpty) { - None - } else { - Some(actorSystem.dispatchers.lookup(appConfig.getString(configKey))) - } - implicit val dispatcher: ExecutionContext = getSkuberConfig("akka.dispatcher", dispatcherFromConfig, actorSystem.dispatcher) - - def durationFomConfig(configKey: String): Option[Duration] = Some(Duration.fromNanos(appConfig.getDuration(configKey).toNanos)) - val watchIdleTimeout: Duration = getSkuberConfig("watch.idle-timeout", durationFomConfig, Duration.Inf) - val podLogIdleTimeout: Duration = getSkuberConfig("pod-log.idle-timeout", durationFomConfig, Duration.Inf) - - val watchContinuouslyRequestTimeout: Duration = getSkuberConfig("watch-continuously.request-timeout", durationFomConfig, 30.seconds) - val watchContinuouslyIdleTimeout: Duration = getSkuberConfig("watch-continuously.idle-timeout", durationFomConfig, 60.seconds) - val watchPoolIdleTimeout: Duration = getSkuberConfig("watch-continuously.pool-idle-timeout", durationFomConfig, 60.seconds) - - //The watch idle timeout needs to be greater than watch api request timeout - require(watchContinuouslyIdleTimeout > watchContinuouslyRequestTimeout) - - if (logConfig.logConfiguration) { - val log = Logging.getLogger(actorSystem, "skuber.api") - log.info("Using following context for connecting to Kubernetes cluster: {}", k8sContext) - } - - val sslContext = TLS.establishSSLContext(k8sContext) - - val theNamespaceName = k8sContext.namespace.name match { - case "" => "default" - case name => name - } - - val requestMaker = (uri: Uri, method: HttpMethod) => HttpRequest(method = method, uri = uri) - - val defaultClientSettings = ConnectionPoolSettings(actorSystem.settings.config) - val watchConnectionSettings = defaultClientSettings.connectionSettings.withIdleTimeout(watchIdleTimeout) - val watchSettings = defaultClientSettings.withConnectionSettings(watchConnectionSettings) - val podLogConnectionSettings = defaultClientSettings.connectionSettings.withIdleTimeout(podLogIdleTimeout) - val podLogSettings = defaultClientSettings.withConnectionSettings(podLogConnectionSettings) - - new RequestContext( - requestMaker, k8sContext.cluster.server, k8sContext.authInfo, - theNamespaceName, watchContinuouslyRequestTimeout, watchContinuouslyIdleTimeout, - watchPoolIdleTimeout, watchSettings, podLogSettings, sslContext, logConfig, closeHook - ) - } - - def defaultK8sConfig: Configuration = { - import java.nio.file.Paths - - val skuberUrlOverride = sys.env.get("SKUBER_URL") - skuberUrlOverride match { - case Some(url) => - Configuration.useProxyAt(url) - case None => - val skuberConfigEnv = sys.env.get("SKUBER_CONFIG") - skuberConfigEnv match { - case Some(conf) if conf == "file" => - Configuration.parseKubeconfigFile().get // default kubeconfig location - case Some(conf) if conf == "proxy" => - Configuration.useLocalProxyDefault - case Some(fileUrl) => - val path = Paths.get(new URL(fileUrl).toURI) - Configuration.parseKubeconfigFile(path).get - case None => - // try KUBECONFIG - val kubeConfigEnv = sys.env.get("KUBECONFIG") - kubeConfigEnv.map { kc => - Configuration.parseKubeconfigFile(Paths.get(kc)) - }.getOrElse { - // Try to get config from a running pod - // if that is not set then use default kubeconfig location - Configuration.inClusterConfig.orElse( - Configuration.parseKubeconfigFile() - ) - }.get - } - } - } - - private def defaultAppConfig: Config = ConfigFactory.load() -} diff --git a/client/src/main/scala/skuber/api/BytesToWatchEventSource.scala b/client/src/main/scala/skuber/api/watch/BytesToWatchEventSource.scala similarity index 97% rename from client/src/main/scala/skuber/api/BytesToWatchEventSource.scala rename to client/src/main/scala/skuber/api/watch/BytesToWatchEventSource.scala index f713c346..1c0fed5b 100644 --- a/client/src/main/scala/skuber/api/BytesToWatchEventSource.scala +++ b/client/src/main/scala/skuber/api/watch/BytesToWatchEventSource.scala @@ -1,4 +1,4 @@ -package skuber.api +package skuber.api.watch import akka.stream.scaladsl.{JsonFraming, Source} import akka.util.ByteString diff --git a/client/src/main/scala/skuber/api/LongPollingPool.scala b/client/src/main/scala/skuber/api/watch/LongPollingPool.scala similarity index 98% rename from client/src/main/scala/skuber/api/LongPollingPool.scala rename to client/src/main/scala/skuber/api/watch/LongPollingPool.scala index d963d077..13a1ec2b 100644 --- a/client/src/main/scala/skuber/api/LongPollingPool.scala +++ b/client/src/main/scala/skuber/api/watch/LongPollingPool.scala @@ -1,9 +1,9 @@ -package skuber.api +package skuber.api.watch import akka.NotUsed import akka.actor.ActorSystem -import akka.http.scaladsl.{Http, HttpsConnectionContext} import akka.http.scaladsl.settings.{ClientConnectionSettings, ConnectionPoolSettings} +import akka.http.scaladsl.{Http, HttpsConnectionContext} import akka.stream.Materializer import skuber.api.client.Pool diff --git a/client/src/main/scala/skuber/api/Watch.scala b/client/src/main/scala/skuber/api/watch/Watch.scala similarity index 76% rename from client/src/main/scala/skuber/api/Watch.scala rename to client/src/main/scala/skuber/api/watch/Watch.scala index 732aa853..7a5c7d13 100644 --- a/client/src/main/scala/skuber/api/Watch.scala +++ b/client/src/main/scala/skuber/api/watch/Watch.scala @@ -1,12 +1,14 @@ -package skuber.api +package skuber.api.watch -import scala.concurrent.{ExecutionContext, Future} -import scala.language.postfixOps import akka.http.scaladsl.model.{HttpMethods, _} import akka.stream.scaladsl.Source import play.api.libs.json.Format import skuber.api.client._ -import skuber.{ObjectResource, ResourceDefinition} +import skuber.api.client.impl.KubernetesClientImpl +import skuber.{ListOptions, ObjectResource, ResourceDefinition} + +import scala.concurrent.{ExecutionContext, Future} +import scala.language.postfixOps /** * @author David O'Riordan @@ -14,7 +16,7 @@ import skuber.{ObjectResource, ResourceDefinition} * Based on Akka streaming */ object Watch { - + /** * Get a source of events on a specific Kubernetes resource * @param context the applicable request context @@ -26,13 +28,18 @@ object Watch { * @tparam O * @return */ - def events[O <: ObjectResource](context: RequestContext, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int)( + def events[O <: ObjectResource](context: KubernetesClientImpl, name: String, sinceResourceVersion: Option[String] = None, bufSize: Int)( implicit format: Format[O], rd: ResourceDefinition[O], lc: LoggingContext) : Future[Source[WatchEvent[O], _]] = { context.logInfo(context.logConfig.logRequestBasic, s"creating watch on resource $name of kind ${rd.spec.names.kind}") - val maybeResourceVersionQuery = sinceResourceVersion map { version => Uri.Query("resourceVersion" -> version) } - val request = context.buildRequest(HttpMethods.GET, rd, Some(name), query = maybeResourceVersionQuery, watch = true) + val nameFieldSelector=Some(s"metadata.name=$name") + val watchOptions=ListOptions( + resourceVersion = sinceResourceVersion, + watch = Some(true), + fieldSelector = nameFieldSelector + ) + val request = context.buildRequest(HttpMethods.GET, rd, None, query = Some(Uri.Query(watchOptions.asMap))) val responseFut = context.invokeWatch(request) toFutureWatchEventSource(context, responseFut, bufSize) } @@ -47,13 +54,13 @@ object Watch { * @tparam O * @return a Future which will eventually return a Source of events */ - def eventsOnKind[O <: ObjectResource](context: RequestContext, sinceResourceVersion: Option[String] = None, bufSize: Int)( + def eventsOnKind[O <: ObjectResource](context: KubernetesClientImpl, sinceResourceVersion: Option[String] = None, bufSize: Int)( implicit format: Format[O], rd: ResourceDefinition[O], lc: LoggingContext) : Future[Source[WatchEvent[O], _]] = { context.logInfo(context.logConfig.logRequestBasic, s"creating skuber watch on kind ${rd.spec.names.kind}") - val maybeResourceVersionQuery = sinceResourceVersion map { v => Uri.Query("resourceVersion" -> v) } - val request = context.buildRequest(HttpMethods.GET, rd, None, query = maybeResourceVersionQuery, watch = true) + val watchOptions=ListOptions(resourceVersion = sinceResourceVersion, watch = Some(true)) + val request = context.buildRequest(HttpMethods.GET, rd, None, query = Some(Uri.Query(watchOptions.asMap))) val responseFut = context.invokeWatch(request) toFutureWatchEventSource(context, responseFut, bufSize) } @@ -67,7 +74,7 @@ object Watch { * @tparam O the Kubernetes kind of each events object * @return a Future which will eventually return a Source of events */ - private def toFutureWatchEventSource[O <: ObjectResource](context: RequestContext, eventStreamResponseFut: Future[HttpResponse], bufSize: Int)( + private def toFutureWatchEventSource[O <: ObjectResource](context: KubernetesClientImpl, eventStreamResponseFut: Future[HttpResponse], bufSize: Int)( implicit format: Format[O],lc: LoggingContext): Future[Source[WatchEvent[O], _]] = { implicit val ec: ExecutionContext = context.actorSystem.dispatcher diff --git a/client/src/main/scala/skuber/api/WatchSource.scala b/client/src/main/scala/skuber/api/watch/WatchSource.scala similarity index 72% rename from client/src/main/scala/skuber/api/WatchSource.scala rename to client/src/main/scala/skuber/api/watch/WatchSource.scala index 7580e313..6b26f4c5 100644 --- a/client/src/main/scala/skuber/api/WatchSource.scala +++ b/client/src/main/scala/skuber/api/watch/WatchSource.scala @@ -1,4 +1,4 @@ -package skuber.api +package skuber.api.watch import akka.NotUsed import akka.actor.ActorSystem @@ -7,7 +7,8 @@ import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Merge, Source} import akka.stream.{Materializer, SourceShape} import play.api.libs.json.Format import skuber.api.client._ -import skuber.{K8SRequestContext, ObjectResource, ResourceDefinition} +import skuber.api.client.impl.KubernetesClientImpl +import skuber.{K8SRequestContext, ObjectResource, ResourceDefinition, ListOptions} import scala.concurrent.ExecutionContext import scala.concurrent.duration._ @@ -26,14 +27,10 @@ private[api] object WatchSource { case class StreamContext(currentResourceVersion: Option[String], state: StreamState) - private val timeoutSecondsQueryParam: String = "timeoutSeconds" - private val resourceVersionQueryParam: String = "resourceVersion" - - def apply[O <: ObjectResource](rc: K8SRequestContext, + def apply[O <: ObjectResource](client: KubernetesClientImpl, pool: Pool[Start[O]], name: Option[String], - sinceResourceVersion: Option[String], - maxWatchRequestTimeout: Duration, + options: ListOptions, bufSize: Int)(implicit sys: ActorSystem, fm: Materializer, format: Format[O], @@ -44,11 +41,16 @@ private[api] object WatchSource { implicit val dispatcher: ExecutionContext = sys.dispatcher - def createRequest(rc: RequestContext, name: Option[String], sinceResourceVersion: Option[String], rd: ResourceDefinition[O], timeout: Duration) = { - rc.buildRequest( - HttpMethods.GET, rd, name, - query = Some(Uri.Query(timeoutSecondsQueryParam -> timeout.toSeconds.toString :: sinceResourceVersion.map(v => resourceVersionQueryParam -> v).toList: _*)), - watch = true + def createWatchRequest(since: Option[String]) = + { + val nameFieldSelector=name.map(objName => s"metadata.name=$objName") + val watchOptions=options.copy( + resourceVersion = since, + watch = Some(true), + fieldSelector = nameFieldSelector.orElse(options.fieldSelector) + ) + client.buildRequest( + HttpMethods.GET, rd, None, query = Some(Uri.Query(watchOptions.asMap)) ) } @@ -57,28 +59,28 @@ private[api] object WatchSource { def singleStart(s:StreamElement[O]) = Source.single(s) val initSource = Source.single( - (createRequest(rc, name, sinceResourceVersion, rd, maxWatchRequestTimeout), Start[O](sinceResourceVersion)) + (createWatchRequest(options.resourceVersion), Start[O](options.resourceVersion)) ) val httpFlow: Flow[(HttpRequest, Start[O]), StreamElement[O], NotUsed] = Flow[(HttpRequest, Start[O])].map { request => // log request - rc.logInfo(rc.logConfig.logRequestBasic, s"about to send HTTP request: ${request._1.method.value} ${request._1.uri.toString}") + client.logInfo(client.logConfig.logRequestBasic, s"about to send HTTP request: ${request._1.method.value} ${request._1.uri.toString}") request }.via(pool).flatMapConcat { case (Success(HttpResponse(StatusCodes.OK, _, entity, _)), se) => - rc.logInfo(rc.logConfig.logResponseBasic, s"received response with HTTP status 200") + client.logInfo(client.logConfig.logResponseBasic, s"received response with HTTP status 200") singleStart(se).concat( BytesToWatchEventSource[O](entity.dataBytes, bufSize).map { event => Result[O](event._object.resourceVersion, event) } ).concat(singleEnd) case (Success(HttpResponse(sc, _, entity, _)), _) => - rc.logWarn(s"Error watching resource. Recieved a status of ${sc.intValue()}") + client.logWarn(s"Error watching resource. Received a status of ${sc.intValue()}") entity.discardBytes() - throw new K8SException(Status(message = Some("Error watching resource."), code = Some(sc.intValue()))) + throw new K8SException(Status(message = Some("Error watching resource"), code = Some(sc.intValue()))) case (Failure(f), _) => - rc.logError("Error watching resource.", f) - throw new K8SException(Status(message = Some("Error watching resource."))) + client.logError("Error watching resource.", f) + throw new K8SException(Status(message = Some("Error watching resource"), details = Some(f.getMessage))) } val outboundFlow: Flow[StreamElement[O], WatchEvent[O], NotUsed] = @@ -89,6 +91,7 @@ private[api] object WatchSource { case _ => throw new K8SException(Status(message = Some("Error processing watch events."))) } + val feedbackFlow: Flow[StreamElement[O], (HttpRequest, Start[O]), NotUsed] = Flow[StreamElement[O]].scan(StreamContext(None, Waiting)){(cxt, next) => next match { @@ -97,7 +100,7 @@ private[api] object WatchSource { case End() => cxt.copy(state = Finished) } }.filter(_.state == Finished).map { acc => - (createRequest(rc, name, acc.currentResourceVersion, rd, maxWatchRequestTimeout), Start[O](acc.currentResourceVersion)) + (createWatchRequest(acc.currentResourceVersion), Start[O](acc.currentResourceVersion)) } val init = b.add(initSource) diff --git a/client/src/main/scala/skuber/json/package.scala b/client/src/main/scala/skuber/json/package.scala index ea1e0a2a..a3eaedbb 100644 --- a/client/src/main/scala/skuber/json/package.scala +++ b/client/src/main/scala/skuber/json/package.scala @@ -204,7 +204,8 @@ package object format { implicit val listMetaFormat: Format[ListMeta] = ( (JsPath \ "selfLink").formatMaybeEmptyString() and - (JsPath \ "resourceVersion").formatMaybeEmptyString() + (JsPath \ "resourceVersion").formatMaybeEmptyString() and + (JsPath \ "continue").formatNullable[String] )(ListMeta.apply _, unlift(ListMeta.unapply)) implicit val localObjRefFormat = Json.format[LocalObjectReference] diff --git a/client/src/main/scala/skuber/package.scala b/client/src/main/scala/skuber/package.scala index e267ed8d..c3dd9975 100644 --- a/client/src/main/scala/skuber/package.scala +++ b/client/src/main/scala/skuber/package.scala @@ -2,12 +2,6 @@ import scala.language.implicitConversions import java.net.URL -import akka.http.scaladsl.model.{HttpCharsets, MediaType} -import akka.stream.Materializer -import com.typesafe.config.Config -import play.api.libs.json._ -import skuber.api.client.{RequestContext, Status} - /* * Represents core types and aliases * In future some of these are likely to be moved to separate files and even packages @@ -69,7 +63,8 @@ package object skuber { case class ListMeta( selfLink: String = "", - resourceVersion: String = "") + resourceVersion: String = "", + continue: Option[String] = None) case class APIVersions( kind: String, @@ -229,7 +224,6 @@ package object skuber { type DeletePropagation = Value val Orphan, Background, Foreground = Value } - case class Preconditions(uid: String="") case class DeleteOptions( apiVersion: String = "v1", @@ -238,6 +232,32 @@ package object skuber { preconditions: Option[Preconditions] = None, propagationPolicy: Option[DeletePropagation.Value] = None) + // List options can be passed to a list or watch request. + case class ListOptions( + labelSelector: Option[LabelSelector] = None, + fieldSelector: Option[String] = None, + includeUninitialized: Option[Boolean] = None, + resourceVersion: Option[String] = None, + timeoutSeconds: Option[Long] = None, + limit: Option[Long] = None, + continue: Option[String] = None, + watch: Option[Boolean] = None // NOTE: not for application use - it will be overridden by watch requests + ) { + lazy val asOptionalsMap: Map[String, Option[String]] = Map( + "labelSelector" -> labelSelector.map(_.toString), + "fieldSelector" -> fieldSelector, + "includeUninitialized" -> includeUninitialized.map(_.toString), + "resourceVersion" -> resourceVersion, + "timeoutSeconds" -> timeoutSeconds.map(_.toString), + "limit" -> limit.map(_.toString), + "continue" -> continue, + "watch" -> watch.map(_.toString)) + + lazy val asMap: Map[String, String] = asOptionalsMap.collect { + case (key, Some(value)) => key -> value + } + } + // Any object resource type [O <: ObjectResource] that supports a `status` subresource must provide an // implicit value of HasStatusSubresource type to enable the client `status` API methods to be used on // such resources @@ -245,33 +265,40 @@ package object skuber { // aliases, references and delegates that enable using the API for many use cases without // having to import anything from the skuber.api package + + import skuber.api.client.KubernetesClient + val K8SCluster = skuber.api.client.Cluster val K8SContext = skuber.api.client.Context - type K8SRequestContext = skuber.api.client.RequestContext + type K8SRequestContext = KubernetesClient type K8SException = skuber.api.client.K8SException val K8SConfiguration = skuber.api.Configuration type K8SWatchEvent[I <: ObjectResource] = skuber.api.client.WatchEvent[I] + // Initialisation of the Skuber Kubernetes client + import akka.actor.ActorSystem + import akka.stream.Materializer + import com.typesafe.config.Config /** * Initialise Skuber using default Kubernetes and application configuration. */ - def k8sInit(implicit actorSystem: ActorSystem, materializer: Materializer): RequestContext = { + def k8sInit(implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClient = { skuber.api.client.init } /** * Initialise Skuber using the specified Kubernetes configuration and default application configuration. */ - def k8sInit(config: skuber.api.Configuration)(implicit actorSystem: ActorSystem, materializer: Materializer): RequestContext = { + def k8sInit(config: skuber.api.Configuration)(implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClient = { skuber.api.client.init(config) } /** * Initialise Skuber using default Kubernetes configuration and the specified application configuration. */ - def k8sInit(appConfig: Config)(implicit actorSystem: ActorSystem, materializer: Materializer): RequestContext = { + def k8sInit(appConfig: Config)(implicit actorSystem: ActorSystem, materializer: Materializer): KubernetesClient = { skuber.api.client.init(appConfig) } @@ -279,7 +306,8 @@ package object skuber { * Initialise Skuber using the specified Kubernetes and application configuration. */ def k8sInit(config: skuber.api.Configuration, appConfig: Config)(implicit actorSystem: ActorSystem, materializer: Materializer) - : RequestContext = { + : KubernetesClient = + { skuber.api.client.init(config, appConfig) } } diff --git a/client/src/test/scala/skuber/api/BytesToWatchEventSourceSpec.scala b/client/src/test/scala/skuber/api/BytesToWatchEventSourceSpec.scala index 5825f9ea..930d474a 100644 --- a/client/src/test/scala/skuber/api/BytesToWatchEventSourceSpec.scala +++ b/client/src/test/scala/skuber/api/BytesToWatchEventSourceSpec.scala @@ -8,6 +8,7 @@ import akka.util.ByteString import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import akka.actor.ActorSystem +import skuber.api.watch.BytesToWatchEventSource import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration.Duration diff --git a/client/src/test/scala/skuber/api/LongPollingPoolSpec.scala b/client/src/test/scala/skuber/api/LongPollingPoolSpec.scala index 5c3cd211..02f5b240 100644 --- a/client/src/test/scala/skuber/api/LongPollingPoolSpec.scala +++ b/client/src/test/scala/skuber/api/LongPollingPoolSpec.scala @@ -17,6 +17,7 @@ import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory} import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.{Millis, Seconds, Span} import org.specs2.mutable.Specification +import skuber.api.watch.LongPollingPool import scala.concurrent.ExecutionContext import scala.concurrent.duration._ diff --git a/client/src/test/scala/skuber/api/WatchSourceSpec.scala b/client/src/test/scala/skuber/api/WatchSourceSpec.scala index 3338b3dd..e88a0cc6 100644 --- a/client/src/test/scala/skuber/api/WatchSourceSpec.scala +++ b/client/src/test/scala/skuber/api/WatchSourceSpec.scala @@ -13,9 +13,11 @@ import com.fasterxml.jackson.core.JsonParseException import org.mockito.Mockito.{times, verify, when} import org.scalatest.mockito.MockitoSugar import org.specs2.mutable.Specification -import skuber.api.WatchSource.Start +import skuber.api.client.impl.KubernetesClientImpl +import skuber.api.watch.WatchSource.Start import skuber.api.client.{LoggingContext, _} -import skuber.{Container, DNSPolicy, K8SRequestContext, ObjectMeta, ObjectResource, Pod, Protocol, ReplicationController, Resource, RestartPolicy} +import skuber.api.watch.WatchSource +import skuber.{Container, DNSPolicy, ListOptions, ObjectMeta, ObjectResource, Pod, Protocol, ReplicationController, Resource, RestartPolicy} import skuber.json.format._ import scala.concurrent.Await @@ -31,25 +33,25 @@ class WatchSourceSpec extends Specification with MockitoSugar { "WatchSource" should { "read event continuously with no name specified and from a point in time" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) val secondRequest = HttpRequest(uri = Uri("http://watch/2")) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null) ).thenReturn(firstRequest) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804", "watch" -> "true")), null) ).thenReturn(secondRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerFirstRequest.json"))), secondRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerSecondRequest.json"))) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(repsonses), None, Some("12802"), 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(responses), None, ListOptions(resourceVersion=Some("12802"), timeoutSeconds=Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -64,36 +66,36 @@ class WatchSourceSpec extends Specification with MockitoSugar { downstream.expectComplete() - verify(rc, times(4)).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null + verify(client, times(4)).logConfig + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null ) - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804", "watch" -> "true")), null ) ok } "read event continuously with no name specified from the beginning" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) val secondRequest = HttpRequest(uri = Uri("http://watch/2")) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "watch" -> "true")), null) ).thenReturn(firstRequest) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804", "watch" -> "true")), null) ).thenReturn(secondRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerFirstRequest.json"))), secondRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerSecondRequest.json"))) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(repsonses), None, None, 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(responses), None, ListOptions(timeoutSeconds=Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -108,36 +110,41 @@ class WatchSourceSpec extends Specification with MockitoSugar { downstream.expectComplete() - verify(rc, times(4)).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1")), watch = true, null + verify(client, times(4)).logConfig + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "watch" -> "true")), null ) - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804", "watch" -> "true")), null ) ok } "read event continuously with name specified from a point in time" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) val secondRequest = HttpRequest(uri = Uri("http://watch/2")) + val name = "someName" + val nameFieldSelector=s"metadata.name=$name" + val query1=Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true", "fieldSelector" -> nameFieldSelector) + val query2=Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804", "watch" -> "true", "fieldSelector" -> nameFieldSelector) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, Some("someName"), Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(query1), null) ).thenReturn(firstRequest) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, Some("someName"), Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(query2), null) ).thenReturn(secondRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerFirstRequest.json"))), secondRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerSecondRequest.json"))) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(repsonses), Some("someName"), Some("12802"), 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(responses), Some(name), ListOptions(resourceVersion= Some("12802"),timeoutSeconds = Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -152,37 +159,40 @@ class WatchSourceSpec extends Specification with MockitoSugar { downstream.expectComplete() - verify(rc, times(4)).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, Some("someName"), Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null + verify(client, times(4)).logConfig + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(query1), null ) - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, Some("someName"), Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(query2), null ) ok } "read event continuously with name specified from the beginning" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) val secondRequest = HttpRequest(uri = Uri("http://watch/2")) - - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, Some("someName"), Some(Uri.Query("timeoutSeconds" -> "1")), watch = true, null) + val name="someName" + val nameFieldSelector=s"metadata.name=$name" + val query1=Uri.Query("timeoutSeconds" -> "1", "watch" -> "true", "fieldSelector" -> nameFieldSelector) + val query2=Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804", "watch" -> "true","fieldSelector" -> nameFieldSelector) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(query1), null) ).thenReturn(firstRequest) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, Some("someName"), Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(query2), null) ).thenReturn(secondRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerFirstRequest.json"))), secondRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerSecondRequest.json"))) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(repsonses), Some("someName"), None, 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(responses), Some(name), ListOptions(timeoutSeconds = Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -197,39 +207,39 @@ class WatchSourceSpec extends Specification with MockitoSugar { downstream.expectComplete() - verify(rc, times(4)).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, Some("someName"), Some(Uri.Query("timeoutSeconds" -> "1")), watch = true, null + verify(client, times(4)).logConfig + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(query1), null ) - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, Some("someName"), Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(query2), null ) ok } "handle empty responses from the cluster when request timeout times out" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) val secondRequest = HttpRequest(uri = Uri("http://watch/2")) val thirdRequest = HttpRequest(uri = Uri("http://watch/3")) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null) ).thenReturn(firstRequest) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804", "watch" -> "true")), null) ).thenReturn(secondRequest).thenReturn(thirdRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerFirstRequest.json"))), secondRequest -> HttpResponse(StatusCodes.OK, entity = ""), thirdRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity(retrieveWatchJson("/watchReplicationControllerSecondRequest.json"))) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(repsonses), None, Some("12802"), 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(responses), None, ListOptions(resourceVersion=Some("12802"), timeoutSeconds=Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -244,31 +254,31 @@ class WatchSourceSpec extends Specification with MockitoSugar { downstream.expectComplete() - verify(rc, times(6)).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null + verify(client, times(6)).logConfig + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null ) - verify(rc, times(2)).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804")), watch = true, null + verify(client, times(2)).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12804", "watch" -> "true")), null ) ok } "handle bad input from cluster" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null) ).thenReturn(firstRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.OK, entity = "bad input") ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(repsonses), None, Some("12802"), 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(responses), None, ListOptions(resourceVersion=Some("12802"), timeoutSeconds=Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -280,29 +290,29 @@ class WatchSourceSpec extends Specification with MockitoSugar { error must haveClass[FramingException] error.getMessage mustEqual "Invalid JSON encountered at position [0] of [ByteString(98, 97, 100, 32, 105, 110, 112, 117, 116)]" - verify(rc, times(2)).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null + verify(client, times(2)).logConfig + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null ) ok } "handle bad json from cluster" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null) ).thenReturn(firstRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.OK, entity = createHttpEntity("{asdf:asdfa}")) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(repsonses), None, Some("12802"), 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(responses), None, ListOptions(resourceVersion=Some("12802"), timeoutSeconds=Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -314,29 +324,29 @@ class WatchSourceSpec extends Specification with MockitoSugar { error must haveClass[JsonParseException] error.getMessage mustEqual "Unexpected character ('a' (code 97)): was expecting double-quote to start field name\n at [Source: {asdf:asdfa}; line: 1, column: 3]" - verify(rc, times(2)).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null + verify(client, times(2)).logConfig + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null ) ok } "handle a HTTP 500 error from service" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null) ).thenReturn(firstRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.InternalServerError) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(repsonses), None, Some("12802"), 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(responses), None, ListOptions(resourceVersion=Some("12802"), timeoutSeconds=Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -346,31 +356,31 @@ class WatchSourceSpec extends Specification with MockitoSugar { .expectError() error must haveClass[K8SException] - error.asInstanceOf[K8SException].status mustEqual Status(message = Some("Error watching resource."), code = Some(500)) + error.asInstanceOf[K8SException].status.code mustEqual Some(500) - verify(rc).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null + verify(client).logConfig + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null ) ok } - "handle a HTTP 403 error from service" >> { - val rc = mock[K8SRequestContext] + "handle a HTTP 401 error from service" >> { + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null) ).thenReturn(firstRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.Unauthorized) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(repsonses), None, Some("12802"), 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(responses), None, ListOptions(resourceVersion=Some("12802"), timeoutSeconds=Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -380,32 +390,32 @@ class WatchSourceSpec extends Specification with MockitoSugar { .expectError() error must haveClass[K8SException] - error.asInstanceOf[K8SException].status mustEqual Status(message = Some("Error watching resource."), code = Some(401)) + error.asInstanceOf[K8SException].status.code mustEqual Some(401) - verify(rc).logConfig + verify(client).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null ) ok } "handle idle timeout from service" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null) ).thenReturn(firstRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.InternalServerError) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(new TcpIdleTimeoutException("timeout", 10.seconds)), None, Some("12802"), 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(new TcpIdleTimeoutException("timeout", 10.seconds)), None, ListOptions(resourceVersion=Some("12802"), timeoutSeconds=Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -415,32 +425,31 @@ class WatchSourceSpec extends Specification with MockitoSugar { .expectError() error must haveClass[K8SException] - error.asInstanceOf[K8SException].status mustEqual Status(message = Some("Error watching resource.")) - verify(rc).logConfig + verify(client).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null ) ok } "handle connection timeout from service" >> { - val rc = mock[K8SRequestContext] + val client = mock[KubernetesClientImpl] val firstRequest = HttpRequest(uri = Uri("http://watch/1")) - when(rc.logConfig).thenReturn(LoggingConfig()) - when(rc.buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null) + when(client.logConfig).thenReturn(LoggingConfig()) + when(client.buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null) ).thenReturn(firstRequest) - val repsonses = Map( + val responses = Map( firstRequest -> HttpResponse(StatusCodes.InternalServerError) ) val (switch, downstream) = - WatchSource[ReplicationController](rc, mockPool(new ConnectException(s"Connect timeout of 10s expired")), None, Some("12802"), 1.second, 10000) + WatchSource[ReplicationController](client, mockPool(new ConnectException(s"Connect timeout of 10s expired")), None, ListOptions(resourceVersion=Some("12802"), timeoutSeconds=Some(1)), 10000) .viaMat(KillSwitches.single)(Keep.right) .toMat(TestSink.probe)(Keep.both) .run() @@ -450,12 +459,10 @@ class WatchSourceSpec extends Specification with MockitoSugar { .expectError() error must haveClass[K8SException] - error.asInstanceOf[K8SException].status mustEqual Status(message = Some("Error watching resource.")) - - verify(rc).logConfig - verify(rc).buildRequest( - HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802")), watch = true, null + verify(client).logConfig + verify(client).buildRequest( + HttpMethods.GET, skuber.ReplicationController.rcDef, None, Some(Uri.Query("timeoutSeconds" -> "1", "resourceVersion" -> "12802", "watch" -> "true")), null ) ok diff --git a/examples/src/main/scala/skuber/examples/guestbook/KubernetesProxyActor.scala b/examples/src/main/scala/skuber/examples/guestbook/KubernetesProxyActor.scala index 30f76bfb..3e425a64 100644 --- a/examples/src/main/scala/skuber/examples/guestbook/KubernetesProxyActor.scala +++ b/examples/src/main/scala/skuber/examples/guestbook/KubernetesProxyActor.scala @@ -56,7 +56,7 @@ class KubernetesProxyActor extends Actor with ActorLogging { implicit val materializer = ActorMaterializer() implicit val dispatcher = system.dispatcher - val k8s = k8sInit // initialize skuber client (request context) + val k8s = k8sInit // initialize skuber client ( var rcWatching = mutable.HashMap[String, Watching]() private def invoke(skuberRequest: => Future[Any]) : Future[Any] = { From 4b1b8ad6df1332d2ecce9b5c705152fe2ff0a554 Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Sat, 24 Nov 2018 17:13:13 +0900 Subject: [PATCH 3/5] Refactor patch objects (#243) --- client/src/it/scala/skuber/PatchSpec.scala | 4 ++-- client/src/main/scala/skuber/api/patch.scala | 10 +++++++++- .../src/main/scala/skuber/json/package.scala | 4 ++-- .../skuber/examples/patch/PatchExamples.scala | 18 ++++++++---------- 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/client/src/it/scala/skuber/PatchSpec.scala b/client/src/it/scala/skuber/PatchSpec.scala index f2d8f1f5..5670e290 100644 --- a/client/src/it/scala/skuber/PatchSpec.scala +++ b/client/src/it/scala/skuber/PatchSpec.scala @@ -88,12 +88,12 @@ class PatchSpec extends K8SFixture with Eventually with Matchers with BeforeAndA it should "patch a pod with json patch" in { k8s => val randomString = java.util.UUID.randomUUID().toString val annotations = Map("skuber" -> "wow") - val patchData = JsonPatchOperationList(List( + val patchData = JsonPatch(List( JsonPatchOperation.Add("/metadata/labels/foo", randomString), JsonPatchOperation.Add("/metadata/annotations", randomString), JsonPatchOperation.Remove("/metadata/annotations"), )) - k8s.patch[JsonPatchOperationList, Pod](nginxPodName, patchData).map { _ => + k8s.patch[JsonPatch, Pod](nginxPodName, patchData).map { _ => eventually(timeout(10 seconds), interval(1 seconds)) { val retrievePod = k8s.get[Pod](nginxPodName) val podRetrieved = Await.ready(retrievePod, 2 seconds).value.get diff --git a/client/src/main/scala/skuber/api/patch.scala b/client/src/main/scala/skuber/api/patch.scala index eec9a28b..7ab15216 100644 --- a/client/src/main/scala/skuber/api/patch.scala +++ b/client/src/main/scala/skuber/api/patch.scala @@ -66,10 +66,18 @@ package object patch { case object JsonPatchStrategy extends PatchStrategy - case class JsonPatchOperationList(operations: List[JsonPatchOperation.Operation]) extends Patch { + case class JsonPatch(operations: List[JsonPatchOperation.Operation]) extends Patch { override val strategy = JsonPatchStrategy } + trait JsonMergePatch extends Patch { + override val strategy = JsonMergePatchStrategy + } + + trait StrategicMergePatch extends Patch { + override val strategy = StrategicMergePatchStrategy + } + case class MetadataPatch(labels: Option[Map[String, String]] = Some(Map()), annotations: Option[Map[String, String]] = Some(Map()), override val strategy: MergePatchStrategy = StrategicMergePatchStrategy) extends Patch diff --git a/client/src/main/scala/skuber/json/package.scala b/client/src/main/scala/skuber/json/package.scala index a3eaedbb..0479a8c2 100644 --- a/client/src/main/scala/skuber/json/package.scala +++ b/client/src/main/scala/skuber/json/package.scala @@ -8,7 +8,7 @@ import org.apache.commons.codec.binary.Base64 import play.api.libs.functional.syntax._ import play.api.libs.json._ import skuber._ -import skuber.api.patch.{JsonPatchOperation, JsonPatchOperationList, MetadataPatch} +import skuber.api.patch.{JsonPatchOperation, JsonPatch, MetadataPatch} /** * @author David O'Riordan @@ -1118,7 +1118,7 @@ package object format { })) } - implicit def jsonPatchOperationListWrite = Writes[JsonPatchOperationList] { value => + implicit def jsonPatchWrite = Writes[JsonPatch] { value => JsArray(value.operations.map(jsonPatchOperationWrite.writes)) } implicit val metadataPatchWrite = Writes[MetadataPatch] { value => diff --git a/examples/src/main/scala/skuber/examples/patch/PatchExamples.scala b/examples/src/main/scala/skuber/examples/patch/PatchExamples.scala index c57330b9..f503b437 100644 --- a/examples/src/main/scala/skuber/examples/patch/PatchExamples.scala +++ b/examples/src/main/scala/skuber/examples/patch/PatchExamples.scala @@ -8,8 +8,8 @@ import skuber.apps.v1beta1.StatefulSet import scala.concurrent.Await import scala.concurrent.duration.Duration.Inf - import play.api.libs.json.{Format, Json} +import skuber.api.patch.JsonMergePatch /** * @author David O'Riordan * @@ -19,18 +19,16 @@ import play.api.libs.json.{Format, Json} */ object PatchExamples extends App { - // A model - with implicit JSON formatters - of the specific JSON merge patch we are going to send to the server in this example. - // The resulting JSON will look like `{ "spec" : { "replicas" : } }`, and will be converted to a String for sending via the - // API jsonMergePatch method. + // API patch method. // This will change the replica count on the statefulset causing it to scale accordingly. - // Clients do not need to make a model with JSON formatters to send a patch, as the API method takes the patch JSON as a String type, but - // this pattern ensures valid JSON is sent case class ReplicaSpec(replicas: Int) - case class ReplicaPatch(spec: ReplicaSpec) + case class ReplicaPatch(spec: ReplicaSpec) extends JsonMergePatch implicit val rsFmt: Format[ReplicaSpec] = Json.format[ReplicaSpec] implicit val rpFmt: Format[ReplicaPatch] = Json.format[ReplicaPatch] + val statefulSetName = "nginx-patch-sts" + def scaleNginx = { val nginxContainer = Container("nginx",image="nginx").exposePort(80) @@ -40,9 +38,9 @@ object PatchExamples extends App { val nginxStsLabels=Map("patch-example" -> "statefulset") val nginxStsSel=LabelSelector(LabelSelector.IsEqualRequirement("patch-example","statefulset")) val nginxStsSpec=nginxBaseSpec.addLabels(nginxStsLabels) - val nginxStatefulSet= StatefulSet("nginx-patch-sts") + val nginxStatefulSet= StatefulSet(statefulSetName) .withReplicas(4) - .withServiceName("nginx-patch-sts") + .withServiceName(statefulSetName) .withLabelSelector(nginxStsSel) .withTemplate(nginxStsSpec) @@ -82,7 +80,7 @@ object PatchExamples extends App { val singleReplicaPatchJsonStr=singleReplicaPatchJson.toString // Send the Patch to the statefulset on Kubernetes - val patchedStsFut = k8s.jsonMergePatch(sts, singleReplicaPatchJsonStr) + val patchedStsFut = k8s.patch[ReplicaPatch, StatefulSet](statefulSetName, singleReplicaPatch) val patchedSts = Await.result(patchedStsFut, Inf) println(s"Patched statefulset now has a desired replica count of ${patchedSts.spec.get.replicas}") From 6f2f17d92c6eac376579b311b055f9a7d21caad0 Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Sat, 24 Nov 2018 18:13:05 +0900 Subject: [PATCH 4/5] Update security context (#241) --- client/src/main/scala/skuber/Container.scala | 2 +- client/src/main/scala/skuber/Pod.scala | 2 +- client/src/main/scala/skuber/Security.scala | 66 ++++++++++++------- .../src/main/scala/skuber/json/package.scala | 27 ++++++-- .../resources/examplePodExtendedSpec.json | 1 - .../scala/skuber/json/PodFormatSpec.scala | 6 +- 6 files changed, 67 insertions(+), 37 deletions(-) diff --git a/client/src/main/scala/skuber/Container.scala b/client/src/main/scala/skuber/Container.scala index 253a4a57..59c90b6e 100644 --- a/client/src/main/scala/skuber/Container.scala +++ b/client/src/main/scala/skuber/Container.scala @@ -23,7 +23,7 @@ case class Container( terminationMessagePath: Option[String] = None, terminationMessagePolicy: Option[Container.TerminationMessagePolicy.Value] = None, imagePullPolicy: Container.PullPolicy.Value = Container.PullPolicy.IfNotPresent, - securityContext: Option[Security.Context] = None, + securityContext: Option[SecurityContext] = None, envFrom: List[EnvFromSource] = Nil, stdin: Option[Boolean] = None, stdinOnce: Option[Boolean] = None, diff --git a/client/src/main/scala/skuber/Pod.scala b/client/src/main/scala/skuber/Pod.scala index 5720ffb6..fe17982f 100644 --- a/client/src/main/scala/skuber/Pod.scala +++ b/client/src/main/scala/skuber/Pod.scala @@ -40,7 +40,7 @@ object Pod { imagePullSecrets: List[LocalObjectReference] = List(), affinity: Option[Affinity] = None, tolerations: List[Toleration] = List(), - securityContext: Option[Security.Context] = None, + securityContext: Option[PodSecurityContext] = None, hostname: Option[String] = None, hostAliases: List[HostAlias] = Nil, hostPID: Option[Boolean] = None, diff --git a/client/src/main/scala/skuber/Security.scala b/client/src/main/scala/skuber/Security.scala index a7b85d56..d870ebf0 100644 --- a/client/src/main/scala/skuber/Security.scala +++ b/client/src/main/scala/skuber/Security.scala @@ -1,32 +1,48 @@ package skuber /** - * @author David O'Riordan - */ + * @author David O'Riordan + */ + +import Security._ + +case class SecurityContext( + allowPrivilegeEscalation: Option[Boolean] = None, + capabilities: Option[Capabilities] = None, + privileged: Option[Boolean] = None, + readOnlyRootFilesystem: Option[Boolean] = None, + runAsGroup: Option[Int] = None, + runAsNonRoot: Option[Boolean] = None, + runAsUser: Option[Int] = None, + seLinuxOptions: Option[SELinuxOptions] = None + ) + +case class PodSecurityContext( + fsGroup: Option[Int] = None, + runAsGroup: Option[Int] = None, + runAsNonRoot: Option[Boolean] = None, + runAsUser: Option[Int] = None, + seLinuxOptions: Option[SELinuxOptions] = None, + supplementalGroups: List[Int] = Nil, + sysctls: List[Sysctl] = Nil + ) object Security { - // Note: Context can be set at pod and/or container level but certain attributes are level-specific - case class Context( - capabilities: Option[Capabilities] = None, - privileged: Option[Boolean] = None, - seLinuxOptions: Option[SELinuxOptions] = None, - runAsUser: Option[Int] = None, - runAsGroup: Option[Int] = None, - runAsNonRoot: Option[Boolean] = None, - fsGroup: Option[Int] = None, - allowPrivilegeEscalation: Option[Boolean] = None - ) - - type Capability=String + type Capability = String + case class Capabilities( - add: List[Capability] = Nil, - drop: List[Capability] = Nil) - + add: List[Capability] = Nil, + drop: List[Capability] = Nil) + case class SELinuxOptions( - user: String = "", - role: String = "", - _type: String = "", - level: String = "") - - -} \ No newline at end of file + user: String = "", + role: String = "", + _type: String = "", + level: String = "") + + case class Sysctl( + name: String, + value: String + ) + +} \ No newline at end of file diff --git a/client/src/main/scala/skuber/json/package.scala b/client/src/main/scala/skuber/json/package.scala index 0479a8c2..a468e8f0 100644 --- a/client/src/main/scala/skuber/json/package.scala +++ b/client/src/main/scala/skuber/json/package.scala @@ -7,6 +7,7 @@ import java.time.format._ import org.apache.commons.codec.binary.Base64 import play.api.libs.functional.syntax._ import play.api.libs.json._ +import skuber.Volume.{ConfigMapVolumeSource, KeyToPath} import skuber._ import skuber.api.patch.{JsonPatchOperation, JsonPatch, MetadataPatch} @@ -242,8 +243,20 @@ package object format { (JsPath \ "add").formatMaybeEmptyList[Security.Capability] and (JsPath \ "drop").formatMaybeEmptyList[Security.Capability] )(Security.Capabilities.apply _, unlift(Security.Capabilities.unapply)) - - implicit val secCtxtFormat: Format[Security.Context] = Json.format[Security.Context] + + implicit val secSysctlFormat: Format[Security.Sysctl] = Json.format[Security.Sysctl] + + implicit val secCtxtFormat: Format[SecurityContext] = Json.format[SecurityContext] + + implicit val podSecCtxtFormat: Format[PodSecurityContext] = ( + (JsPath \ "fsGroup").formatNullable[Int] and + (JsPath \ "runAsGroup").formatNullable[Int] and + (JsPath \ "runAsNonRoot").formatNullable[Boolean] and + (JsPath \ "runAsUser").formatNullable[Int] and + (JsPath \ "seLinuxOptions").formatNullable[Security.SELinuxOptions] and + (JsPath \ "supplementalGroups").formatMaybeEmptyList[Int] and + (JsPath \ "sysctls").formatMaybeEmptyList[Security.Sysctl] + )(PodSecurityContext.apply _, unlift(PodSecurityContext.unapply)) implicit val tolerationEffectFmt: Format[Pod.TolerationEffect] = new Format[Pod.TolerationEffect] { @@ -659,7 +672,7 @@ package object format { (JsPath \ "terminationMessagePath").formatNullable[String] and (JsPath \ "terminationMessagePolicy").formatNullableEnum(Container.TerminationMessagePolicy) and (JsPath \ "imagePullPolicy").formatEnum(Container.PullPolicy, Some(Container.PullPolicy.IfNotPresent)) and - (JsPath \ "securityContext").formatNullable[Security.Context] and + (JsPath \ "securityContext").formatNullable[SecurityContext] and (JsPath \ "envFrom").formatMaybeEmptyList[EnvFromSource] and (JsPath \ "stdin").formatNullable[Boolean] and (JsPath \ "stdinOnce").formatNullable[Boolean] and @@ -760,7 +773,7 @@ package object format { // which has finally necessitated a hack to get around Play Json limitations supporting case classes with > 22 members // (see e.g. https://stackoverflow.com/questions/28167971/scala-case-having-22-fields-but-having-issue-with-play-json-in-scala-2-11-5) - val podSpecPartOneFormat: OFormat[(List[Container], List[Container], List[Volume], skuber.RestartPolicy.Value, Option[Int], Option[Int], skuber.DNSPolicy.Value, Map[String, String], String, String, Boolean, List[LocalObjectReference], Option[Pod.Affinity], List[Pod.Toleration], Option[Security.Context])] = ( + val podSpecPartOneFormat: OFormat[(List[Container], List[Container], List[Volume], skuber.RestartPolicy.Value, Option[Int], Option[Int], skuber.DNSPolicy.Value, Map[String, String], String, String, Boolean, List[LocalObjectReference], Option[Pod.Affinity], List[Pod.Toleration], Option[PodSecurityContext])] = ( (JsPath \ "containers").format[List[Container]] and (JsPath \ "initContainers").formatMaybeEmptyList[Container] and (JsPath \ "volumes").formatMaybeEmptyList[Volume] and @@ -775,7 +788,7 @@ package object format { (JsPath \ "imagePullSecrets").formatMaybeEmptyList[LocalObjectReference] and (JsPath \ "affinity").formatNullable[Pod.Affinity] and (JsPath \ "tolerations").formatMaybeEmptyList[Pod.Toleration] and - (JsPath \ "securityContext").formatNullable[Security.Context] + (JsPath \ "securityContext").formatNullable[PodSecurityContext] ).tupled val podSpecPartTwoFormat: OFormat[(Option[String], List[Pod.HostAlias], Option[Boolean], Option[Boolean], Option[Boolean], Option[Int], Option[String], Option[String], Option[String], Option[Pod.DNSConfig])] = ( @@ -794,8 +807,8 @@ package object format { implicit val podSpecFmt: Format[Pod.Spec] = ( podSpecPartOneFormat and podSpecPartTwoFormat ).apply({ - case ((conts, initConts, vols, rpol, tgps, adls, dnspol, nodesel, svcac, node, hnet, ips, aff, tol, sc), (host, aliases, pid, ipc, asat, prio, prioc, sched, subd, dnsc)) => - Pod.Spec(conts, initConts, vols, rpol, tgps, adls, dnspol, nodesel, svcac, node, hnet, ips, aff, tol, sc, host, aliases, pid, ipc, asat, prio, prioc, sched, subd, dnsc) + case ((conts, initConts, vols, rpol, tgps, adls, dnspol, nodesel, svcac, node, hnet, ips, aff, tol, psc), (host, aliases, pid, ipc, asat, prio, prioc, sched, subd, dnsc)) => + Pod.Spec(conts, initConts, vols, rpol, tgps, adls, dnspol, nodesel, svcac, node, hnet, ips, aff, tol, psc, host, aliases, pid, ipc, asat, prio, prioc, sched, subd, dnsc) }, s =>( ( s.containers, s.initContainers, diff --git a/client/src/test/resources/examplePodExtendedSpec.json b/client/src/test/resources/examplePodExtendedSpec.json index 777fb601..583a77d4 100644 --- a/client/src/test/resources/examplePodExtendedSpec.json +++ b/client/src/test/resources/examplePodExtendedSpec.json @@ -118,7 +118,6 @@ }, "securityContext": { "runAsUser": 1000, - "fsGroup": 2000, "allowPrivilegeEscalation": true } } diff --git a/client/src/test/scala/skuber/json/PodFormatSpec.scala b/client/src/test/scala/skuber/json/PodFormatSpec.scala index c65c462f..5b651729 100644 --- a/client/src/test/scala/skuber/json/PodFormatSpec.scala +++ b/client/src/test/scala/skuber/json/PodFormatSpec.scala @@ -65,7 +65,7 @@ import Pod._ lifecycle=Some(Lifecycle(preStop=Some(ExecAction(List("/bin/bash", "probe"))))), terminationMessagePath=Some("/var/log/termination-message"), terminationMessagePolicy=Some(Container.TerminationMessagePolicy.File), - securityContext=Some(Security.Context(capabilities=Some(Security.Capabilities(add=List("CAP_KILL"),drop=List("CAP_AUDIT_WRITE"))))) + securityContext=Some(SecurityContext(capabilities=Some(Security.Capabilities(add=List("CAP_KILL"),drop=List("CAP_AUDIT_WRITE"))))) ) ) val vols = List(Volume("myVol1", Volume.Glusterfs("myEndpointsName", "/usr/mypath")), @@ -74,7 +74,8 @@ import Pod._ volumes=vols, dnsPolicy=DNSPolicy.ClusterFirst, nodeSelector=Map("diskType" -> "ssd", "machineSize" -> "large"), - imagePullSecrets=List(LocalObjectReference("abc"),LocalObjectReference("def")) + imagePullSecrets=List(LocalObjectReference("abc"),LocalObjectReference("def")), + securityContext=Some(PodSecurityContext(supplementalGroups=List(1, 2, 3))) ) val myPod = Namespace("myNamespace").pod("myPod",pdSpec) @@ -565,6 +566,7 @@ import Pod._ val pod = Json.parse(podJsonStr).as[Pod] val container=pod.spec.get.containers(0) + container.securityContext.get.runAsUser mustEqual Some(1000) container.terminationMessagePolicy mustEqual Some(Container.TerminationMessagePolicy.FallbackToLogsOnError) container.terminationMessagePath mustEqual Some("/tmp/my-log") val mount=container.volumeMounts(0) From b653b116fd2c0203a93a3870d99af19e99e9e8ab Mon Sep 17 00:00:00 2001 From: Shingo Omura Date: Thu, 10 Jan 2019 01:14:57 +0900 Subject: [PATCH 5/5] support 'matchFields' in NodeSelectorTerm which has been supported since k8s v1.11 (#246) also renamed 'Pod.Affinity.MatchExpression[s]' with 'NodeSelectorRequirement[s]' according to k8s api. ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#nodeselectorterm-v1-core --- client/src/main/scala/skuber/Pod.scala | 14 ++-- .../src/main/scala/skuber/json/package.scala | 9 +-- .../resources/exampleNodeSelectorTerm.json | 21 ++++++ .../exampleNodeSelectorTermEmpty.json | 1 + ...pleNodeSelectorTermNoMatchExpressions.json | 11 +++ .../exampleNodeSelectorTermNoMatchFields.json | 12 ++++ .../scala/skuber/json/PodFormatSpec.scala | 68 +++++++++++++++++++ 7 files changed, 125 insertions(+), 11 deletions(-) create mode 100644 client/src/test/resources/exampleNodeSelectorTerm.json create mode 100644 client/src/test/resources/exampleNodeSelectorTermEmpty.json create mode 100644 client/src/test/resources/exampleNodeSelectorTermNoMatchExpressions.json create mode 100644 client/src/test/resources/exampleNodeSelectorTermNoMatchFields.json diff --git a/client/src/main/scala/skuber/Pod.scala b/client/src/main/scala/skuber/Pod.scala index fe17982f..722ba656 100644 --- a/client/src/main/scala/skuber/Pod.scala +++ b/client/src/main/scala/skuber/Pod.scala @@ -90,11 +90,11 @@ object Pod { val In, NotIn, Exists, DoesNotExist, Gt, Lt = Value } - case class MatchExpression(key: String, operator: NodeSelectorOperator.Value, values: List[String]) - type MatchExpressions = List[MatchExpression] - def MatchExpressions(xs: MatchExpression*) = List(xs: _*) + case class NodeSelectorRequirement(key: String, operator: NodeSelectorOperator.Value, values: List[String]) + type NodeSelectorRequirements = List[NodeSelectorRequirement] + def NodeSelectorRequirements(xs: NodeSelectorRequirement*) = List(xs: _*) - case class NodeSelectorTerm(matchExpressions: MatchExpressions) + case class NodeSelectorTerm(matchExpressions: NodeSelectorRequirements = List.empty, matchFields: NodeSelectorRequirements = List.empty) type NodeSelectorTerms = List[NodeSelectorTerm] def NodeSelectorTerms(xs: NodeSelectorTerm*) = List(xs: _*) @@ -111,8 +111,8 @@ object Pod { RequiredDuringSchedulingIgnoredDuringExecution( NodeSelectorTerms( NodeSelectorTerm( - MatchExpressions( - MatchExpression(key, operator, values) + matchExpressions = NodeSelectorRequirements( + NodeSelectorRequirement(key, operator, values) ) ) ) @@ -127,7 +127,7 @@ object Pod { def preferredQuery(weight: Int, key: String, operator: NodeSelectorOperator.Value, values: List[String]): PreferredSchedulingTerm = { PreferredSchedulingTerm( preference = NodeSelectorTerm( - MatchExpressions(MatchExpression(key, operator, values)) + matchExpressions = NodeSelectorRequirements(NodeSelectorRequirement(key, operator, values)) ), weight = weight ) diff --git a/client/src/main/scala/skuber/json/package.scala b/client/src/main/scala/skuber/json/package.scala index a468e8f0..1c696b47 100644 --- a/client/src/main/scala/skuber/json/package.scala +++ b/client/src/main/scala/skuber/json/package.scala @@ -717,15 +717,16 @@ package object format { implicit val nodeAffinityOperatorFormat: Format[Pod.Affinity.NodeSelectorOperator.Operator] = Format(enumReads(Pod.Affinity.NodeSelectorOperator), enumWrites) - implicit val nodeMatchExpressionFormat: Format[Pod.Affinity.MatchExpression] = ( + implicit val nodeMatchExpressionFormat: Format[Pod.Affinity.NodeSelectorRequirement] = ( (JsPath \ "key").formatMaybeEmptyString() and (JsPath \ "operator").formatEnum(Pod.Affinity.NodeSelectorOperator) and (JsPath \ "values").formatMaybeEmptyList[String] - )(Pod.Affinity.MatchExpression.apply _, unlift(Pod.Affinity.MatchExpression.unapply)) + )(Pod.Affinity.NodeSelectorRequirement.apply _, unlift(Pod.Affinity.NodeSelectorRequirement.unapply)) implicit val nodeSelectorTermFormat: Format[Pod.Affinity.NodeSelectorTerm] = ( - (JsPath \ "matchExpressions").format[Pod.Affinity.MatchExpressions].inmap(matchExpressions => Pod.Affinity.NodeSelectorTerm(matchExpressions), (nst: Pod.Affinity.NodeSelectorTerm) => nst.matchExpressions) - ) + (JsPath \ "matchExpressions").formatMaybeEmptyList[Pod.Affinity.NodeSelectorRequirement] and + (JsPath \ "matchFields").formatMaybeEmptyList[Pod.Affinity.NodeSelectorRequirement] + )(Pod.Affinity.NodeSelectorTerm.apply _, unlift(Pod.Affinity.NodeSelectorTerm.unapply)) implicit val nodeRequiredDuringSchedulingIgnoredDuringExecutionFormat: Format[Pod.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution] = ( (JsPath \ "nodeSelectorTerms").format[Pod.Affinity.NodeSelectorTerms].inmap( diff --git a/client/src/test/resources/exampleNodeSelectorTerm.json b/client/src/test/resources/exampleNodeSelectorTerm.json new file mode 100644 index 00000000..ae5128c3 --- /dev/null +++ b/client/src/test/resources/exampleNodeSelectorTerm.json @@ -0,0 +1,21 @@ +{ + "matchExpressions": [ + { + "key": "kubernetes.io/e2e-az-name", + "operator": "In", + "values": [ + "e2e-az1", + "e2e-az2" + ] + } + ], + "matchFields": [ + { + "key": "metadata.name", + "operator": "In", + "values": [ + "some-node-name" + ] + } + ] +} diff --git a/client/src/test/resources/exampleNodeSelectorTermEmpty.json b/client/src/test/resources/exampleNodeSelectorTermEmpty.json new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/client/src/test/resources/exampleNodeSelectorTermEmpty.json @@ -0,0 +1 @@ +{} diff --git a/client/src/test/resources/exampleNodeSelectorTermNoMatchExpressions.json b/client/src/test/resources/exampleNodeSelectorTermNoMatchExpressions.json new file mode 100644 index 00000000..d873b643 --- /dev/null +++ b/client/src/test/resources/exampleNodeSelectorTermNoMatchExpressions.json @@ -0,0 +1,11 @@ +{ + "matchFields": [ + { + "key": "metadata.name", + "operator": "In", + "values": [ + "some-node-name" + ] + } + ] +} diff --git a/client/src/test/resources/exampleNodeSelectorTermNoMatchFields.json b/client/src/test/resources/exampleNodeSelectorTermNoMatchFields.json new file mode 100644 index 00000000..24db35d8 --- /dev/null +++ b/client/src/test/resources/exampleNodeSelectorTermNoMatchFields.json @@ -0,0 +1,12 @@ +{ + "matchExpressions": [ + { + "key": "kubernetes.io/e2e-az-name", + "operator": "In", + "values": [ + "e2e-az1", + "e2e-az2" + ] + } + ] +} diff --git a/client/src/test/scala/skuber/json/PodFormatSpec.scala b/client/src/test/scala/skuber/json/PodFormatSpec.scala index 5b651729..3b707970 100644 --- a/client/src/test/scala/skuber/json/PodFormatSpec.scala +++ b/client/src/test/scala/skuber/json/PodFormatSpec.scala @@ -422,6 +422,74 @@ import Pod._ myPod.spec.get.volumes.head.source must beAnInstanceOf[GenericVolumeSource] } + "NodeSelectorTerm be properly read and written as json" >> { + import Affinity.NodeSelectorTerm + import Affinity.NodeSelectorOperator + import Affinity.NodeSelectorRequirement + import Affinity.NodeSelectorRequirements + + val nodeSelectorTermJsonSource = Source.fromURL(getClass.getResource("/exampleNodeSelectorTerm.json")) + val nodeSelectorTermJson = nodeSelectorTermJsonSource.mkString + val myTerm = Json.parse(nodeSelectorTermJson).as[NodeSelectorTerm] + myTerm must_== NodeSelectorTerm( + matchExpressions = NodeSelectorRequirements( + NodeSelectorRequirement("kubernetes.io/e2e-az-name", NodeSelectorOperator.In, List("e2e-az1", "e2e-az2")) + ), + matchFields = NodeSelectorRequirements( + NodeSelectorRequirement("metadata.name", NodeSelectorOperator.In, List("some-node-name")) + ) + ) + val readTerm = Json.fromJson[NodeSelectorTerm](Json.toJson(myTerm)).get + myTerm mustEqual readTerm + } + + "NodeSelectorTerm with no matchExpressions be properly read and written as json" >> { + import Affinity.NodeSelectorTerm + import Affinity.NodeSelectorOperator + import Affinity.NodeSelectorRequirement + import Affinity.NodeSelectorRequirements + + val nodeSelectorTermJsonSource = Source.fromURL(getClass.getResource("/exampleNodeSelectorTermNoMatchExpressions.json")) + val nodeSelectorTermJson = nodeSelectorTermJsonSource.mkString + val myTerm = Json.parse(nodeSelectorTermJson).as[NodeSelectorTerm] + myTerm must_== NodeSelectorTerm( + matchFields = NodeSelectorRequirements( + NodeSelectorRequirement("metadata.name", NodeSelectorOperator.In, List("some-node-name")) + ) + ) + val readTerm = Json.fromJson[NodeSelectorTerm](Json.toJson(myTerm)).get + myTerm mustEqual readTerm + } + + "NodeSelectorTerm with no matchFields be properly read and written as json" >> { + import Affinity.NodeSelectorTerm + import Affinity.NodeSelectorOperator + import Affinity.NodeSelectorRequirement + import Affinity.NodeSelectorRequirements + + val nodeSelectorTermJsonSource = Source.fromURL(getClass.getResource("/exampleNodeSelectorTermNoMatchFields.json")) + val nodeSelectorTermJson = nodeSelectorTermJsonSource.mkString + val myTerm = Json.parse(nodeSelectorTermJson).as[NodeSelectorTerm] + myTerm must_== NodeSelectorTerm( + matchExpressions = NodeSelectorRequirements( + NodeSelectorRequirement("kubernetes.io/e2e-az-name", NodeSelectorOperator.In, List("e2e-az1", "e2e-az2")) + ) + ) + val readTerm = Json.fromJson[NodeSelectorTerm](Json.toJson(myTerm)).get + myTerm mustEqual readTerm + } + + "NodeSelectorTerm with empty be properly read and written as json" >> { + import Affinity.NodeSelectorTerm + + val nodeSelectorTermJsonSource = Source.fromURL(getClass.getResource("/exampleNodeSelectorTermEmpty.json")) + val nodeSelectorTermJson = nodeSelectorTermJsonSource.mkString + val myTerm = Json.parse(nodeSelectorTermJson).as[NodeSelectorTerm] + myTerm must_== NodeSelectorTerm() + val readTerm = Json.fromJson[NodeSelectorTerm](Json.toJson(myTerm)).get + myTerm mustEqual readTerm + } + "NodeAffinity be properly read and written as json" >> { import Affinity.NodeAffinity import Affinity.NodeSelectorOperator