diff --git a/.gitignore b/.gitignore index f5afad2b1e..ed9b778ad1 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ node_modules # Runtime files .garden tmp/ +metadata.json # TS cache on the CI ts-node-* diff --git a/docs/reference/providers/kubernetes.md b/docs/reference/providers/kubernetes.md index 49791fa053..b2b5112449 100644 --- a/docs/reference/providers/kubernetes.md +++ b/docs/reference/providers/kubernetes.md @@ -215,8 +215,9 @@ environments: ### `environments[].providers[].buildMode` [environments](#environments) > [providers](#environments[].providers[]) > buildMode -Choose the mechanism used to build containers before deploying. By default it uses the local docker, but you can set it to 'cluster-docker' to sync files to a remote docker daemon, installed in the cluster, and build container images there. -This is currently experimental and sometimes not needed (e.g. with Docker for Desktop), so it's not enabled by default. +Choose the mechanism used to build containers before deploying. By default it uses the local docker, but you can set it to 'cluster-docker' or 'kaniko' to sync files to a remote docker daemon, installed in the cluster, and build container images there. This avoids the need to run Docker or Kubernetes locally, and allows you to share layer and image caches between multiple developers, as well as between your development and CI workflows. +This is currently experimental and sometimes not desired, so it's not enabled by default. For example when using the `local-kubernetes` provider with Docker for Desktop and Minikube, we directly use the in-cluster docker daemon when building. You might also be deploying to a remote cluster that isn't intended as a development environment, so you'd want your builds to happen elsewhere. +Functionally, both 'cluster-docker' and 'kaniko' do the same thing, but use different underlying mechanisms to build. The former uses a normal Docker daemon in the cluster. Because this has to run in privileged mode, this is less secure than Kaniko, but in turn it is generally faster. See the [Kaniko docs](https://github.com/GoogleContainerTools/kaniko) for more information. | Type | Required | | ---- | -------- | @@ -257,7 +258,7 @@ Require SSL on all services. If set to true, an error is raised when no certific References to `docker-registry` secrets to use for authenticating with remote registries when pulling images. This is necessary if you reference private images in your module configuration, and is required -when configuring a remote Kubernetes environment. +when configuring a remote Kubernetes environment with buildMode=local. | Type | Required | | ---- | -------- | @@ -283,69 +284,13 @@ environments: The namespace where the secret is stored. If necessary, the secret may be copied to the appropriate namespace before use. -| Type | Required | -| ---- | -------- | -| `string` | No -### `environments[].providers[].storage` -[environments](#environments) > [providers](#environments[].providers[]) > storage - -Storage parameters to set for the in-cluster builder and container registry persistent volume (which are automatically installed and used when buildMode=cluster-docker). - -| Type | Required | -| ---- | -------- | -| `object` | No -### `environments[].providers[].storage.builder` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > builder - - - -| Type | Required | -| ---- | -------- | -| `object` | No -### `environments[].providers[].storage.builder.size` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [builder](#environments[].providers[].storage.builder) > size - -Volume size for the registry in megabytes. - -| Type | Required | -| ---- | -------- | -| `number` | No -### `environments[].providers[].storage.builder.storageClass` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [builder](#environments[].providers[].storage.builder) > storageClass - -Storage class to use for the volume. - -| Type | Required | -| ---- | -------- | -| `string` | No -### `environments[].providers[].storage.registry` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > registry - - - -| Type | Required | -| ---- | -------- | -| `object` | No -### `environments[].providers[].storage.registry.size` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [registry](#environments[].providers[].storage.registry) > size - -Volume size for the registry in megabytes. - -| Type | Required | -| ---- | -------- | -| `number` | No -### `environments[].providers[].storage.registry.storageClass` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [registry](#environments[].providers[].storage.registry) > storageClass - -Storage class to use for the volume. - | Type | Required | | ---- | -------- | | `string` | No ### `environments[].providers[].resources` [environments](#environments) > [providers](#environments[].providers[]) > resources -Resource requests and limits for the in-cluster builder and container registry (which are automatically installed and used when buildMode=cluster-docker). +Resource requests and limits for the in-cluster builder and container registry (which are automatically installed and used when buildMode is 'cluster-docker' or 'kaniko'). | Type | Required | | ---- | -------- | @@ -462,6 +407,142 @@ Memory request in megabytes. | Type | Required | | ---- | -------- | | `number` | No +### `environments[].providers[].resources.sync` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > sync + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].resources.sync.limits` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > limits + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].resources.sync.limits.cpu` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > [limits](#environments[].providers[].resources.sync.limits) > cpu + +CPU limit in millicpu. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].resources.sync.limits.memory` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > [limits](#environments[].providers[].resources.sync.limits) > memory + +Memory limit in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].resources.sync.requests` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > requests + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].resources.sync.requests.cpu` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > [requests](#environments[].providers[].resources.sync.requests) > cpu + +CPU request in millicpu. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].resources.sync.requests.memory` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > [requests](#environments[].providers[].resources.sync.requests) > memory + +Memory request in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].storage` +[environments](#environments) > [providers](#environments[].providers[]) > storage + +Storage parameters to set for the in-cluster builder, container registry and code sync persistent volumes (which are automatically installed and used when buildMode is 'cluster-docker' or 'kaniko'). + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].storage.builder` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > builder + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].storage.builder.size` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [builder](#environments[].providers[].storage.builder) > size + +Volume size for the registry in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].storage.builder.storageClass` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [builder](#environments[].providers[].storage.builder) > storageClass + +Storage class to use for the volume. + +| Type | Required | +| ---- | -------- | +| `string` | No +### `environments[].providers[].storage.registry` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > registry + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].storage.registry.size` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [registry](#environments[].providers[].storage.registry) > size + +Volume size for the registry in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].storage.registry.storageClass` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [registry](#environments[].providers[].storage.registry) > storageClass + +Storage class to use for the volume. + +| Type | Required | +| ---- | -------- | +| `string` | No +### `environments[].providers[].storage.sync` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > sync + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].storage.sync.size` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [sync](#environments[].providers[].storage.sync) > size + +Volume size for the registry in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].storage.sync.storageClass` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [sync](#environments[].providers[].storage.sync) > storageClass + +Storage class to use for the volume. + +| Type | Required | +| ---- | -------- | +| `string` | No ### `environments[].providers[].tlsCertificates[]` [environments](#environments) > [providers](#environments[].providers[]) > tlsCertificates @@ -693,20 +774,13 @@ variables: {} environments: - providers: - environments: - buildMode: local + buildMode: local-docker defaultHostname: defaultUsername: forceSsl: false imagePullSecrets: - name: namespace: default - storage: - builder: - size: 10240 - storageClass: null - registry: - size: 10240 - storageClass: null resources: builder: limits: @@ -722,6 +796,23 @@ environments: requests: cpu: 200 memory: 512 + sync: + limits: + cpu: 200 + memory: 256 + requests: + cpu: 100 + memory: 64 + storage: + builder: + size: 10240 + storageClass: null + registry: + size: 10240 + storageClass: null + sync: + size: 10240 + storageClass: null tlsCertificates: - name: hostnames: diff --git a/docs/reference/providers/local-kubernetes.md b/docs/reference/providers/local-kubernetes.md index 5c1fb97caf..19ca6a46c4 100644 --- a/docs/reference/providers/local-kubernetes.md +++ b/docs/reference/providers/local-kubernetes.md @@ -215,8 +215,9 @@ environments: ### `environments[].providers[].buildMode` [environments](#environments) > [providers](#environments[].providers[]) > buildMode -Choose the mechanism used to build containers before deploying. By default it uses the local docker, but you can set it to 'cluster-docker' to sync files to a remote docker daemon, installed in the cluster, and build container images there. -This is currently experimental and sometimes not needed (e.g. with Docker for Desktop), so it's not enabled by default. +Choose the mechanism used to build containers before deploying. By default it uses the local docker, but you can set it to 'cluster-docker' or 'kaniko' to sync files to a remote docker daemon, installed in the cluster, and build container images there. This avoids the need to run Docker or Kubernetes locally, and allows you to share layer and image caches between multiple developers, as well as between your development and CI workflows. +This is currently experimental and sometimes not desired, so it's not enabled by default. For example when using the `local-kubernetes` provider with Docker for Desktop and Minikube, we directly use the in-cluster docker daemon when building. You might also be deploying to a remote cluster that isn't intended as a development environment, so you'd want your builds to happen elsewhere. +Functionally, both 'cluster-docker' and 'kaniko' do the same thing, but use different underlying mechanisms to build. The former uses a normal Docker daemon in the cluster. Because this has to run in privileged mode, this is less secure than Kaniko, but in turn it is generally faster. See the [Kaniko docs](https://github.com/GoogleContainerTools/kaniko) for more information. | Type | Required | | ---- | -------- | @@ -257,7 +258,7 @@ Require SSL on all services. If set to true, an error is raised when no certific References to `docker-registry` secrets to use for authenticating with remote registries when pulling images. This is necessary if you reference private images in your module configuration, and is required -when configuring a remote Kubernetes environment. +when configuring a remote Kubernetes environment with buildMode=local. | Type | Required | | ---- | -------- | @@ -283,69 +284,13 @@ environments: The namespace where the secret is stored. If necessary, the secret may be copied to the appropriate namespace before use. -| Type | Required | -| ---- | -------- | -| `string` | No -### `environments[].providers[].storage` -[environments](#environments) > [providers](#environments[].providers[]) > storage - -Storage parameters to set for the in-cluster builder and container registry persistent volume (which are automatically installed and used when buildMode=cluster-docker). - -| Type | Required | -| ---- | -------- | -| `object` | No -### `environments[].providers[].storage.builder` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > builder - - - -| Type | Required | -| ---- | -------- | -| `object` | No -### `environments[].providers[].storage.builder.size` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [builder](#environments[].providers[].storage.builder) > size - -Volume size for the registry in megabytes. - -| Type | Required | -| ---- | -------- | -| `number` | No -### `environments[].providers[].storage.builder.storageClass` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [builder](#environments[].providers[].storage.builder) > storageClass - -Storage class to use for the volume. - -| Type | Required | -| ---- | -------- | -| `string` | No -### `environments[].providers[].storage.registry` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > registry - - - -| Type | Required | -| ---- | -------- | -| `object` | No -### `environments[].providers[].storage.registry.size` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [registry](#environments[].providers[].storage.registry) > size - -Volume size for the registry in megabytes. - -| Type | Required | -| ---- | -------- | -| `number` | No -### `environments[].providers[].storage.registry.storageClass` -[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [registry](#environments[].providers[].storage.registry) > storageClass - -Storage class to use for the volume. - | Type | Required | | ---- | -------- | | `string` | No ### `environments[].providers[].resources` [environments](#environments) > [providers](#environments[].providers[]) > resources -Resource requests and limits for the in-cluster builder and container registry (which are automatically installed and used when buildMode=cluster-docker). +Resource requests and limits for the in-cluster builder and container registry (which are automatically installed and used when buildMode is 'cluster-docker' or 'kaniko'). | Type | Required | | ---- | -------- | @@ -462,6 +407,142 @@ Memory request in megabytes. | Type | Required | | ---- | -------- | | `number` | No +### `environments[].providers[].resources.sync` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > sync + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].resources.sync.limits` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > limits + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].resources.sync.limits.cpu` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > [limits](#environments[].providers[].resources.sync.limits) > cpu + +CPU limit in millicpu. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].resources.sync.limits.memory` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > [limits](#environments[].providers[].resources.sync.limits) > memory + +Memory limit in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].resources.sync.requests` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > requests + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].resources.sync.requests.cpu` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > [requests](#environments[].providers[].resources.sync.requests) > cpu + +CPU request in millicpu. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].resources.sync.requests.memory` +[environments](#environments) > [providers](#environments[].providers[]) > [resources](#environments[].providers[].resources) > [sync](#environments[].providers[].resources.sync) > [requests](#environments[].providers[].resources.sync.requests) > memory + +Memory request in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].storage` +[environments](#environments) > [providers](#environments[].providers[]) > storage + +Storage parameters to set for the in-cluster builder, container registry and code sync persistent volumes (which are automatically installed and used when buildMode is 'cluster-docker' or 'kaniko'). + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].storage.builder` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > builder + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].storage.builder.size` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [builder](#environments[].providers[].storage.builder) > size + +Volume size for the registry in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].storage.builder.storageClass` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [builder](#environments[].providers[].storage.builder) > storageClass + +Storage class to use for the volume. + +| Type | Required | +| ---- | -------- | +| `string` | No +### `environments[].providers[].storage.registry` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > registry + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].storage.registry.size` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [registry](#environments[].providers[].storage.registry) > size + +Volume size for the registry in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].storage.registry.storageClass` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [registry](#environments[].providers[].storage.registry) > storageClass + +Storage class to use for the volume. + +| Type | Required | +| ---- | -------- | +| `string` | No +### `environments[].providers[].storage.sync` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > sync + + + +| Type | Required | +| ---- | -------- | +| `object` | No +### `environments[].providers[].storage.sync.size` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [sync](#environments[].providers[].storage.sync) > size + +Volume size for the registry in megabytes. + +| Type | Required | +| ---- | -------- | +| `number` | No +### `environments[].providers[].storage.sync.storageClass` +[environments](#environments) > [providers](#environments[].providers[]) > [storage](#environments[].providers[].storage) > [sync](#environments[].providers[].storage.sync) > storageClass + +Storage class to use for the volume. + +| Type | Required | +| ---- | -------- | +| `string` | No ### `environments[].providers[].tlsCertificates[]` [environments](#environments) > [providers](#environments[].providers[]) > tlsCertificates @@ -618,20 +699,13 @@ variables: {} environments: - providers: - environments: - buildMode: local + buildMode: local-docker defaultHostname: defaultUsername: forceSsl: false imagePullSecrets: - name: namespace: default - storage: - builder: - size: 10240 - storageClass: null - registry: - size: 10240 - storageClass: null resources: builder: limits: @@ -647,6 +721,23 @@ environments: requests: cpu: 200 memory: 512 + sync: + limits: + cpu: 200 + memory: 256 + requests: + cpu: 100 + memory: 64 + storage: + builder: + size: 10240 + storageClass: null + registry: + size: 10240 + storageClass: null + sync: + size: 10240 + storageClass: null tlsCertificates: - name: hostnames: diff --git a/garden-service/package-lock.json b/garden-service/package-lock.json index e22305dcf6..0f05889886 100644 --- a/garden-service/package-lock.json +++ b/garden-service/package-lock.json @@ -1625,6 +1625,11 @@ "@types/request": "*" } }, + "@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" + }, "@types/semver": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/@types/semver/-/semver-5.5.0.tgz", @@ -10128,6 +10133,15 @@ "eventemitter3": "^3.1.0" } }, + "p-retry": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.1.0.tgz", + "integrity": "sha512-oepllyG9gX1qH4Sm20YAKxg1GA7L7puhvGnTfimi31P07zSIj7SDV6YtuAx9nbJF51DES+2CIIRkXs8GKqWJxA==", + "requires": { + "@types/retry": "^0.12.0", + "retry": "^0.12.0" + } + }, "p-timeout": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-2.0.1.tgz", @@ -11096,6 +11110,11 @@ "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", "dev": true }, + "retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=" + }, "rimraf": { "version": "2.6.3", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", diff --git a/garden-service/package.json b/garden-service/package.json index 7dc7d8b405..aac039e799 100644 --- a/garden-service/package.json +++ b/garden-service/package.json @@ -82,6 +82,7 @@ "node-forge": "^0.8.2", "normalize-url": "^4.3.0", "p-queue": "^5.0.0", + "p-retry": "^4.1.0", "path-is-inside": "^1.0.2", "request": "^2.88.0", "request-promise": "^4.2.4", diff --git a/garden-service/src/garden.ts b/garden-service/src/garden.ts index 01ac1948ce..13904b7075 100644 --- a/garden-service/src/garden.ts +++ b/garden-service/src/garden.ts @@ -40,7 +40,7 @@ import { platform, arch } from "os" import { LogEntry } from "./logger/log-entry" import { EventBus } from "./events" import { Watcher } from "./watch" -import { getIgnorer, Ignorer, getModulesPathsFromPath, getConfigFilePath } from "./util/fs" +import { getIgnorer, Ignorer, getModulesPathsFromPath, getConfigFilePath, getWorkingCopyId } from "./util/fs" import { Provider, ProviderConfig, getProviderDependencies } from "./config/provider" import { ResolveProviderTask } from "./tasks/resolve-provider" import { ActionHelper } from "./actions" @@ -82,6 +82,21 @@ interface ModuleConfigResolveOpts extends ContextResolveOpts { const asyncLock = new AsyncLock() +export interface GardenParams { + buildDir: BuildDir, + environmentName: string, + gardenDirPath: string, + ignorer: Ignorer, + opts: GardenOpts, + plugins: Plugins, + projectName: string, + projectRoot: string, + projectSources?: SourceConfig[], + providerConfigs: ProviderConfig[], + variables: PrimitiveMap, + workingCopyId: string, +} + export class Garden { public readonly log: LogEntry private readonly loadedPlugins: { [key: string]: GardenPlugin } @@ -100,19 +115,31 @@ export class Garden { private actionHelper: ActionHelper public readonly events: EventBus - constructor( - public readonly projectRoot: string, - public readonly projectName: string, - public readonly environmentName: string, - public readonly variables: PrimitiveMap, - public readonly projectSources: SourceConfig[] = [], - public readonly buildDir: BuildDir, - public readonly gardenDirPath: string, - public readonly ignorer: Ignorer, - public readonly opts: GardenOpts, - plugins: Plugins, - private readonly providerConfigs: ProviderConfig[], - ) { + public readonly projectRoot: string + public readonly projectName: string + public readonly environmentName: string + public readonly variables: PrimitiveMap + public readonly projectSources: SourceConfig[] + public readonly buildDir: BuildDir + public readonly gardenDirPath: string + public readonly ignorer: Ignorer + public readonly opts: GardenOpts + private readonly providerConfigs: ProviderConfig[] + public readonly workingCopyId: string + + constructor(params: GardenParams) { + this.buildDir = params.buildDir + this.environmentName = params.environmentName + this.gardenDirPath = params.gardenDirPath + this.ignorer = params.ignorer + this.opts = params.opts + this.projectName = params.projectName + this.projectRoot = params.projectRoot + this.projectSources = params.projectSources || [] + this.providerConfigs = params.providerConfigs + this.variables = params.variables + this.workingCopyId = params.workingCopyId + // make sure we're on a supported platform const currentPlatform = platform() const currentArch = arch() @@ -126,7 +153,7 @@ export class Garden { } this.modulesScanned = false - this.log = opts.log || getLogger().placeholder() + this.log = this.opts.log || getLogger().placeholder() // TODO: Support other VCS options. this.vcs = new GitHandler(this.gardenDirPath) this.configStore = new LocalConfigStore(this.gardenDirPath) @@ -143,7 +170,7 @@ export class Garden { this.watcher = new Watcher(this, this.log) // Register plugins - for (const [name, pluginFactory] of Object.entries({ ...builtinPlugins, ...plugins })) { + for (const [name, pluginFactory] of Object.entries({ ...builtinPlugins, ...params.plugins })) { // This cast is required for the linter to accept the instance type hackery. this.registerPlugin(name, pluginFactory) } @@ -183,8 +210,9 @@ export class Garden { gardenDirPath = resolve(projectRoot, gardenDirPath || DEFAULT_GARDEN_DIR_NAME) const buildDir = await BuildDir.factory(projectRoot, gardenDirPath) const ignorer = await getIgnorer(projectRoot, gardenDirPath) + const workingCopyId = await getWorkingCopyId(gardenDirPath) - const garden = new this( + const garden = new this({ projectRoot, projectName, environmentName, @@ -195,8 +223,9 @@ export class Garden { ignorer, opts, plugins, - providers, - ) as InstanceType + providerConfigs: providers, + workingCopyId, + }) as InstanceType return garden } diff --git a/garden-service/src/plugin-context.ts b/garden-service/src/plugin-context.ts index 24b1531609..666aad3a83 100644 --- a/garden-service/src/plugin-context.ts +++ b/garden-service/src/plugin-context.ts @@ -20,6 +20,7 @@ type WrappedFromGarden = Pick { @@ -69,5 +72,6 @@ export async function createPluginContext(garden: Garden, providerName: string): projectSources: cloneDeep(garden.projectSources), configStore: garden.configStore, provider, + workingCopyId: garden.workingCopyId, } } diff --git a/garden-service/src/plugins/container/build.ts b/garden-service/src/plugins/container/build.ts index d1284e0ef2..0c071f4168 100644 --- a/garden-service/src/plugins/container/build.ts +++ b/garden-service/src/plugins/container/build.ts @@ -53,15 +53,7 @@ export async function buildContainerModule({ module, log }: BuildModuleParams 0 ? name.split("/") : [] if (!tag) { diff --git a/garden-service/src/plugins/kubernetes/api.ts b/garden-service/src/plugins/kubernetes/api.ts index 55e75cb370..d4592d3b64 100644 --- a/garden-service/src/plugins/kubernetes/api.ts +++ b/garden-service/src/plugins/kubernetes/api.ts @@ -16,7 +16,6 @@ import { Apiextensions_v1beta1Api, V1Secret, Policy_v1beta1Api, - Storage_v1Api, CoreApi, ApisApi, V1APIGroup, @@ -78,7 +77,6 @@ const apiTypes: { [key: string]: K8sApiConstructor } = { extensions: Extensions_v1beta1Api, policy: Policy_v1beta1Api, rbac: RbacAuthorization_v1Api, - storage: Storage_v1Api, } const crudMap = { diff --git a/garden-service/src/plugins/kubernetes/config.ts b/garden-service/src/plugins/kubernetes/config.ts index fcfe122c51..512f5e4467 100644 --- a/garden-service/src/plugins/kubernetes/config.ts +++ b/garden-service/src/plugins/kubernetes/config.ts @@ -42,6 +42,7 @@ interface KubernetesResourceSpec { interface KubernetesResources { builder: KubernetesResourceSpec registry: KubernetesResourceSpec + sync: KubernetesResourceSpec } interface KubernetesStorageSpec { @@ -52,10 +53,13 @@ interface KubernetesStorageSpec { interface KubernetesStorage { builder: KubernetesStorageSpec registry: KubernetesStorageSpec + sync: KubernetesStorageSpec } +export type ContainerBuildMode = "local-docker" | "cluster-docker" | "kaniko" + export interface KubernetesBaseConfig extends ProviderConfig { - buildMode: string + buildMode: ContainerBuildMode context: string defaultHostname?: string defaultUsername?: string @@ -99,6 +103,16 @@ export const defaultResources: KubernetesResources = { memory: 512, }, }, + sync: { + limits: { + cpu: 200, + memory: 256, + }, + requests: { + cpu: 100, + memory: 64, + }, + }, } export const defaultStorage: KubernetesStorage = { @@ -110,6 +124,10 @@ export const defaultStorage: KubernetesStorage = { size: 10 * 1024, storageClass: null, }, + sync: { + size: 10 * 1024, + storageClass: null, + }, } const resourceSchema = (defaults: KubernetesResourceSpec) => Joi.object() @@ -178,7 +196,7 @@ const imagePullSecretsSchema = joiArray(secretRef) .description(dedent` References to \`docker-registry\` secrets to use for authenticating with remote registries when pulling images. This is necessary if you reference private images in your module configuration, and is required - when configuring a remote Kubernetes environment. + when configuring a remote Kubernetes environment with buildMode=local. `) const tlsCertificateSchema = Joi.object() @@ -202,15 +220,23 @@ const tlsCertificateSchema = Joi.object() export const kubernetesConfigBase = providerConfigBaseSchema .keys({ buildMode: Joi.string() - .allow("local", "cluster-docker") - .default("local") + .allow("local-docker", "cluster-docker", "kaniko") + .default("local-docker") .description(deline` Choose the mechanism used to build containers before deploying. By default it uses the local docker, but you - can set it to 'cluster-docker' to sync files to a remote docker daemon, installed in the cluster, and build - container images there. + can set it to 'cluster-docker' or 'kaniko' to sync files to a remote docker daemon, installed in the cluster, + and build container images there. This avoids the need to run Docker or Kubernetes locally, and allows you to + share layer and image caches between multiple developers, as well as between your development and CI workflows. + + This is currently experimental and sometimes not desired, so it's not enabled by default. For example when using + the \`local-kubernetes\` provider with Docker for Desktop and Minikube, we directly use the in-cluster docker + daemon when building. You might also be deploying to a remote cluster that isn't intended as a development + environment, so you'd want your builds to happen elsewhere. - This is currently experimental and sometimes not needed (e.g. with Docker for Desktop), so it's not enabled - by default. + Functionally, both 'cluster-docker' and 'kaniko' do the same thing, but use different underlying mechanisms + to build. The former uses a normal Docker daemon in the cluster. Because this has to run in privileged mode, + this is less secure than Kaniko, but in turn it is generally faster. See the + [Kaniko docs](https://github.com/GoogleContainerTools/kaniko) for more information. `), defaultHostname: Joi.string() .description("A default hostname to use when no hostname is explicitly configured for a service.") @@ -224,25 +250,27 @@ export const kubernetesConfigBase = providerConfigBaseSchema "is available for a configured hostname.", ), imagePullSecrets: imagePullSecretsSchema, - storage: Joi.object() - .keys({ - builder: storageSchema(defaultStorage.builder), - registry: storageSchema(defaultStorage.registry), - }) - .default(defaultStorage) - .description(deline` - Storage parameters to set for the in-cluster builder and container registry persistent volume - (which are automatically installed and used when buildMode=cluster-docker). - `), resources: Joi.object() .keys({ builder: resourceSchema(defaultResources.builder), registry: resourceSchema(defaultResources.registry), + sync: resourceSchema(defaultResources.sync), }) .default(defaultResources) .description(deline` Resource requests and limits for the in-cluster builder and container registry - (which are automatically installed and used when buildMode=cluster-docker). + (which are automatically installed and used when buildMode is 'cluster-docker' or 'kaniko'). + `), + storage: Joi.object() + .keys({ + builder: storageSchema(defaultStorage.builder), + registry: storageSchema(defaultStorage.registry), + sync: storageSchema(defaultStorage.sync), + }) + .default(defaultStorage) + .description(deline` + Storage parameters to set for the in-cluster builder, container registry and code sync persistent volumes + (which are automatically installed and used when buildMode is 'cluster-docker' or 'kaniko'). `), tlsCertificates: joiArray(tlsCertificateSchema) .unique("name") diff --git a/garden-service/src/plugins/kubernetes/container/build.ts b/garden-service/src/plugins/kubernetes/container/build.ts index 4f7ed9730e..11956050a6 100644 --- a/garden-service/src/plugins/kubernetes/container/build.ts +++ b/garden-service/src/plugins/kubernetes/container/build.ts @@ -6,86 +6,122 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +import pRetry from "p-retry" import { ContainerModule } from "../../container/config" import { containerHelpers } from "../../container/helpers" -import { buildContainerModule, getContainerBuildStatus } from "../../container/build" +import { buildContainerModule, getContainerBuildStatus, getDockerBuildFlags } from "../../container/build" import { GetBuildStatusParams, BuildStatus } from "../../../types/plugin/module/getBuildStatus" import { BuildModuleParams, BuildResult } from "../../../types/plugin/module/build" -import { getPortForward, getPods } from "../util" +import { getPortForward, getPods, millicpuToString, megabytesToString } from "../util" import { systemNamespace } from "../system" import { RSYNC_PORT } from "../constants" import execa = require("execa") import { posix, resolve } from "path" import { KubeApi } from "../api" import { kubectl } from "../kubectl" -import { ConfigurationError } from "../../../exceptions" import { LogEntry } from "../../../logger/log-entry" -import { KubernetesProvider } from "../config" - -const builderDeployment = "garden-docker-daemon" +import { KubernetesProvider, ContainerBuildMode } from "../config" +import { PluginError } from "../../../exceptions" +import axios from "axios" +import { runPod } from "../run" +import { getRegistryHostname } from "../init" + +const dockerDaemonDeploymentName = "garden-docker-daemon" +const dockerDaemonContainerName = "docker-daemon" +// TODO: make build timeout configurable +const buildTimeout = 600 +// Note: v0.9.0 appears to be completely broken: https://github.com/GoogleContainerTools/kaniko/issues/268 +const kanikoImage = "gcr.io/kaniko-project/executor:v0.8.0" +const registryDeploymentName = "garden-docker-registry" +const registryPort = 5000 +const syncDataVolumeName = "garden-build-sync" +const syncDeploymentName = "garden-build-sync" export async function k8sGetContainerBuildStatus( params: GetBuildStatusParams, ): Promise { - const { ctx } = params + const { ctx, module } = params const provider = ctx.provider - if (provider.config.buildMode === "local") { - const status = await getContainerBuildStatus(params) - - if (ctx.provider.config.deploymentRegistry) { - // TODO: Check if the image exists in the remote registry - } - return status - - } else if (provider.config.buildMode === "cluster-docker") { - return getContainerBuildStatusCluster(params) + const hasDockerfile = await containerHelpers.hasDockerfile(module) - } else { - throw invalidBuildMode(provider) + if (!hasDockerfile) { + // Nothing to build + return { ready: true } } + + const handler = buildStatusHandlers[provider.config.buildMode] + return handler(params) } export async function k8sBuildContainer(params: BuildModuleParams): Promise { const { ctx } = params const provider = ctx.provider + const handler = buildHandlers[provider.config.buildMode] + return handler(params) +} - if (provider.config.buildMode === "local") { - return buildContainerLocal(params) +type BuildStatusHandler = (params: GetBuildStatusParams) => Promise - } else if (provider.config.buildMode === "cluster-docker") { - return buildContainerCluster(params) +const getLocalBuildStatus: BuildStatusHandler = async (params) => { + const { ctx } = params + const status = await getContainerBuildStatus(params) - } else { - throw invalidBuildMode(provider) + if (ctx.provider.config.deploymentRegistry) { + // TODO: Check if the image exists in the remote registry + // Note: Waiting for the `docker registry ls` command to be available in Docker 19.03. Otherwise we'll need to + // attempt to handle all kinds of authentication cases. } + + return status } -async function getContainerBuildStatusCluster(params: GetBuildStatusParams) { +const getRemoteBuildStatus: BuildStatusHandler = async (params) => { const { ctx, module, log } = params const provider = ctx.provider - const hasDockerfile = await containerHelpers.hasDockerfile(module) - - if (!hasDockerfile) { - return { ready: true } - } - - const deploymentImage = await containerHelpers.getDeploymentImageId(module, provider.config.deploymentRegistry) + const registryFwd = await getPortForward({ + ctx, + log, + namespace: systemNamespace, + targetDeployment: `Deployment/${registryDeploymentName}`, + port: registryPort, + }) - const args = ["docker", "images", "-q", deploymentImage] - const res = await execInBuilder(provider, log, args, 30) + const imageId = await containerHelpers.getDeploymentImageId(module, provider.config.deploymentRegistry) + const imageName = containerHelpers.unparseImageId({ + ...containerHelpers.parseImageId(imageId), + host: undefined, + tag: undefined, + }) - const checkLog = res.stdout + res.stderr - log.silly(checkLog) + const url = `http://localhost:${registryFwd.localPort}/v2/${imageName}/manifests/${module.version.versionString}` - // The `docker images -q ` command returns an ID if the image exists, otherwise it returns an empty string - const ready = checkLog.trim().length > 0 + try { + const res = await axios({ url }) + log.silly(res.data) + return { ready: true } + } catch (err) { + if (err.response && err.response.status === 404) { + return { ready: false } + } else { + throw new PluginError(`Could not query in-cluster registry: ${err}`, { + message: err.message, + response: err.response, + }) + } + } +} - return { ready } +const buildStatusHandlers: { [mode in ContainerBuildMode]: BuildStatusHandler } = { + "local-docker": getLocalBuildStatus, + "cluster-docker": getRemoteBuildStatus, + "kaniko": getRemoteBuildStatus, } -async function buildContainerLocal(params: BuildModuleParams) { +type BuildHandler = (params: BuildModuleParams) => Promise + +const localBuild: BuildHandler = async (params) => { const { ctx, module, log } = params const buildResult = await buildContainerModule(params) @@ -108,68 +144,92 @@ async function buildContainerLocal(params: BuildModuleParams) { return buildResult } -async function buildContainerCluster(params: BuildModuleParams) { +const remoteBuild: BuildHandler = async (params) => { const { ctx, module, log } = params const provider = ctx.provider - const hasDockerfile = await containerHelpers.hasDockerfile(module) - - if (!hasDockerfile) { - log.setState("Nothing to build") - - return { - fetched: true, - fresh: false, - version: module.version.versionString, - } - } - // Sync the build context to the remote sync service // -> Get a tunnel to the service log.setState("Syncing sources to cluster...") - const syncFwd = await getPortForward(ctx, log, systemNamespace, `Deployment/${builderDeployment}`, RSYNC_PORT) + const syncFwd = await getPortForward({ + ctx, + log, + namespace: systemNamespace, + targetDeployment: `Deployment/${syncDeploymentName}`, + port: RSYNC_PORT, + }) // -> Run rsync const buildRoot = resolve(module.buildPath, "..") // This trick is used to automatically create the correct target directory with rsync: // https://stackoverflow.com/questions/1636889/rsync-how-can-i-configure-it-to-create-target-directory-on-server const src = `${buildRoot}/./${module.name}/` - const destination = `rsync://localhost:${syncFwd.localPort}/volume/` + const destination = `rsync://localhost:${syncFwd.localPort}/volume/${ctx.workingCopyId}/` log.debug(`Syncing from ${src} to ${destination}`) - // TODO: use list of files from module version - await execa("rsync", ["-vrpztgo", "--relative", src, destination]) - // Execute the build + // We retry a couple of times, because we may get intermittent connection issues or concurrency issues + await pRetry( + () => execa("rsync", ["-vrpztgo", "--relative", "--delete", src, destination]), + { retries: 3, minTimeout: 500 }, + ) + const localId = await containerHelpers.getLocalImageId(module) const deploymentImageId = await containerHelpers.getDeploymentImageId(module, provider.config.deploymentRegistry) + const dockerfile = module.spec.dockerfile || "Dockerfile" + + // Because we're syncing to a shared volume, we need to scope by a unique ID + const contextPath = `/garden-build/${ctx.workingCopyId}/${module.name}/` log.setState(`Building image ${localId}...`) - // Prepare the build command - const dockerfile = module.spec.dockerfile || "Dockerfile" - const contextPath = `/garden-build/${module.name}` - const dockerfilePath = posix.join(contextPath, dockerfile) + let buildLog = "" - const buildArgs = [ - "docker", "build", - "-t", deploymentImageId, - "-f", dockerfilePath, - `/garden-build/${module.name}`, - ] + if (provider.config.buildMode === "cluster-docker") { + // Prepare the build command + const dockerfilePath = posix.join(contextPath, dockerfile) - const buildRes = await execInBuilder(provider, log, buildArgs, 600) + const args = [ + "docker", "build", + "-t", deploymentImageId, + "-f", dockerfilePath, + contextPath, + ...getDockerBuildFlags(module), + ] - const buildLog = buildRes.stdout + buildRes.stderr - log.silly(buildLog) + // Execute the build + const podName = await getBuilderPodName(provider, log) + const buildRes = await execInBuilder({ provider, log, args, timeout: buildTimeout, podName }) + buildLog = buildRes.stdout + buildRes.stderr + + // Push the image to the registry + log.setState({ msg: `Pushing image ${localId} to registry...` }) - // Push the image to the registry - log.setState({ msg: `Pushing image ${localId} to registry...` }) + const dockerCmd = ["docker", "push", deploymentImageId] + const pushArgs = ["/bin/sh", "-c", dockerCmd.join(" ")] - const dockerCmd = ["docker", "push", deploymentImageId] - const pushArgs = ["/bin/sh", "-c", dockerCmd.join(" ")] + const pushRes = await execInBuilder({ provider, log, args: pushArgs, timeout: 300, podName }) + buildLog += pushRes.stdout + pushRes.stderr + + } else { + // build with Kaniko + const args = [ + "executor", + "--context", "dir://" + contextPath, + "--dockerfile", dockerfile, + "--destination", deploymentImageId, + "--cache=true", + "--insecure", // The in-cluster registry is not exposed, so we don't configure TLS on it. + // "--verbosity", "debug", + ...getDockerBuildFlags(module), + ] + + // Execute the build + const buildRes = await runKaniko(provider, log, module, args) + buildLog = buildRes.output + } - await execInBuilder(provider, log, pushArgs, 300) + log.silly(buildLog) return { buildLog, @@ -179,33 +239,126 @@ async function buildContainerCluster(params: BuildModuleParams) } } +interface BuilderExecParams { + provider: KubernetesProvider, + log: LogEntry, + args: string[], + timeout: number, + podName: string, +} + +const buildHandlers: { [mode in ContainerBuildMode]: BuildHandler } = { + "local-docker": localBuild, + "cluster-docker": remoteBuild, + "kaniko": remoteBuild, +} + // TODO: we should make a simple service around this instead of execing into containers -async function execInBuilder(provider: KubernetesProvider, log: LogEntry, args: string[], timeout: number) { - const api = await KubeApi.factory(log, provider.config.context) - const builderDockerPodName = await getBuilderPodName(api) +async function execInBuilder({ provider, log, args, timeout }: BuilderExecParams) { + const podName = await getBuilderPodName(provider, log) - const execCmd = ["exec", "-i", builderDockerPodName, "-c", "docker-daemon", "--", ...args] + const execCmd = ["exec", "-i", podName, "-c", dockerDaemonContainerName, "--", ...args] log.verbose(`Running: kubectl ${execCmd.join(" ")}`) return kubectl.exec({ args: execCmd, - context: api.context, + context: provider.config.context, log, namespace: systemNamespace, timeout, }) } -async function getBuilderPodName(api: KubeApi) { - const builderStatusRes = await api.apps.readNamespacedDeployment(builderDeployment, systemNamespace) +async function getBuilderPodName(provider: KubernetesProvider, log: LogEntry) { + const api = await KubeApi.factory(log, provider.config.context) + + const builderStatusRes = await api.apps.readNamespacedDeployment(dockerDaemonDeploymentName, systemNamespace) const builderPods = await getPods(api, systemNamespace, builderStatusRes.body.spec.selector.matchLabels) + const pod = builderPods[0] + + if (!pod) { + throw new PluginError(`Could not find running image builder`, { + builderDeploymentName: dockerDaemonDeploymentName, + systemNamespace, + }) + } + return builderPods[0].metadata.name } -function invalidBuildMode(provider: KubernetesProvider) { - return new ConfigurationError( - `kubernetes: Invalid build mode '${provider.config.buildMode}'`, - { config: provider.config }, - ) +async function runKaniko(provider: KubernetesProvider, log: LogEntry, module: ContainerModule, args: string[]) { + const podName = `kaniko-${module.name}-${Math.round(new Date().getTime())}` + const registryHostname = getRegistryHostname() + + return runPod({ + args, + context: provider.config.context, + envVars: {}, + ignoreError: false, + image: kanikoImage, + interactive: false, + module, + namespace: systemNamespace, + log, + overrides: { + metadata: { + // Workaround to make sure sidecars are not injected, + // due to https://github.com/kubernetes/kubernetes/issues/25908 + annotations: { "sidecar.istio.io/inject": "false" }, + }, + spec: { + shareProcessNamespace: true, + containers: [ + { + name: "kaniko", + image: kanikoImage, + args, + volumeMounts: [{ + name: syncDataVolumeName, + mountPath: "/garden-build", + }], + resources: { + limits: { + cpu: millicpuToString(provider.config.resources.builder.limits.cpu), + memory: megabytesToString(provider.config.resources.builder.limits.memory), + }, + requests: { + cpu: millicpuToString(provider.config.resources.builder.requests.cpu), + memory: megabytesToString(provider.config.resources.builder.requests.memory), + }, + }, + }, + { + name: "proxy", + image: "basi/socat:v0.1.0", + command: ["/bin/sh", "-c", `socat TCP-LISTEN:5000,fork TCP:${registryHostname}:5000 || exit 0`], + ports: [{ + name: "proxy", + containerPort: registryPort, + protocol: "TCP", + }], + readinessProbe: { + tcpSocket: { port: registryPort }, + }, + }, + // This is a little workaround so that the socat proxy doesn't just keep running after the build finishes. + { + name: "killer", + image: "busybox", + command: [ + "sh", "-c", + "while true; do if pidof executor > /dev/null; then sleep 0.5; else killall socat; exit 0; fi done", + ], + }, + ], + volumes: [{ + name: syncDataVolumeName, + persistentVolumeClaim: { claimName: syncDataVolumeName }, + }], + }, + }, + podName, + timeout: buildTimeout, + }) } diff --git a/garden-service/src/plugins/kubernetes/hot-reload.ts b/garden-service/src/plugins/kubernetes/hot-reload.ts index 7657a4e73b..acbe4c0bc4 100644 --- a/garden-service/src/plugins/kubernetes/hot-reload.ts +++ b/garden-service/src/plugins/kubernetes/hot-reload.ts @@ -239,7 +239,7 @@ export async function syncToService( const namespace = await getAppNamespace(ctx, log, ctx.provider) try { - const portForward = await getPortForward(ctx, log, namespace, targetDeployment, RSYNC_PORT) + const portForward = await getPortForward({ ctx, log, namespace, targetDeployment, port: RSYNC_PORT }) return Bluebird.map(hotReloadSpec.sync, ({ source, target }) => { const src = rsyncSourcePath(service.sourceModule.path, source) diff --git a/garden-service/src/plugins/kubernetes/init.ts b/garden-service/src/plugins/kubernetes/init.ts index 288e605aa7..0f95db9867 100644 --- a/garden-service/src/plugins/kubernetes/init.ts +++ b/garden-service/src/plugins/kubernetes/init.ts @@ -85,10 +85,11 @@ export async function getEnvironmentStatus({ ctx, log }: GetEnvironmentStatusPar systemReady = systemTillerReady && sysNamespaceUpToDate && systemServiceStatus.state === "ready" dashboardPages = systemServiceStatus.dashboardPages - // If we require manual init and system services are outdated (as opposed to unhealthy, missing etc.), we warn + // If we require manual init and system services are outdated but none are *missing*, we warn // in the prepareEnvironment handler, instead of flagging as not ready here. This avoids blocking users where // there's variance in configuration between users of the same cluster, that most likely shouldn't affect usage. - if (needManualInit && systemServiceStatus.state === "outdated") { + const states = Object.values(systemServiceStatus.serviceStatuses).map(s => s.state) + if (needManualInit && systemServiceStatus.state === "outdated" && !states.includes("missing")) { needManualInit = false } @@ -178,22 +179,33 @@ export async function cleanupEnvironment({ ctx, log }: CleanupEnvironmentParams) function getVariables(config: KubernetesConfig) { return { "namespace": systemNamespace, + "registry-hostname": getRegistryHostname(), + "builder-mode": config.buildMode, + "builder-limits-cpu": millicpuToString(config.resources.builder.limits.cpu), "builder-limits-memory": megabytesToString(config.resources.builder.limits.memory), "builder-requests-cpu": millicpuToString(config.resources.builder.requests.cpu), "builder-requests-memory": megabytesToString(config.resources.builder.requests.memory), "builder-storage-size": megabytesToString(config.storage.builder.size), "builder-storage-class": config.storage.builder.storageClass, + "registry-limits-cpu": millicpuToString(config.resources.registry.limits.cpu), "registry-limits-memory": megabytesToString(config.resources.registry.limits.memory), "registry-requests-cpu": millicpuToString(config.resources.registry.requests.cpu), "registry-requests-memory": megabytesToString(config.resources.registry.requests.memory), "registry-storage-size": megabytesToString(config.storage.registry.size), "registry-storage-class": config.storage.registry.storageClass, + + "sync-limits-cpu": millicpuToString(config.resources.sync.limits.cpu), + "sync-limits-memory": megabytesToString(config.resources.sync.limits.memory), + "sync-requests-cpu": millicpuToString(config.resources.sync.requests.cpu), + "sync-requests-memory": megabytesToString(config.resources.sync.requests.memory), + "sync-storage-size": megabytesToString(config.storage.sync.size), + "sync-storage-class": config.storage.sync.storageClass, } } -function getRegistryHostname() { +export function getRegistryHostname() { return `garden-docker-registry.${systemNamespace}.svc.cluster.local` } diff --git a/garden-service/src/plugins/kubernetes/kubernetes.ts b/garden-service/src/plugins/kubernetes/kubernetes.ts index 0fab7b0f9d..63760b84e5 100644 --- a/garden-service/src/plugins/kubernetes/kubernetes.ts +++ b/garden-service/src/plugins/kubernetes/kubernetes.ts @@ -36,17 +36,21 @@ export async function configureProvider({ projectName, config }: ConfigureProvid config._systemServices.push("ingress-controller", "default-backend") } - if (config.buildMode === "cluster-docker") { + if (config.buildMode === "cluster-docker" || config.buildMode === "kaniko") { + // TODO: support external registry // This is a special configuration, used in combination with the registry-proxy service, // to make sure every node in the cluster can resolve the image from the registry we deploy in-cluster. config.deploymentRegistry = { hostname: `127.0.0.1:5000`, - // The base configure handler ensures that the namespace is set - namespace: config.namespace!, + namespace: config.namespace, } // Deploy build services on init - config._systemServices.push("docker-daemon", "docker-registry", "registry-proxy") + config._systemServices.push("build-sync", "docker-registry", "registry-proxy", "nfs-provisioner") + + if (config.buildMode === "cluster-docker") { + config._systemServices.push("docker-daemon") + } } else if (config.name !== "local-kubernetes" && !config.deploymentRegistry) { throw new ConfigurationError( diff --git a/garden-service/src/plugins/kubernetes/run.ts b/garden-service/src/plugins/kubernetes/run.ts index a05e5cdf88..e4e9f86fcc 100644 --- a/garden-service/src/plugins/kubernetes/run.ts +++ b/garden-service/src/plugins/kubernetes/run.ts @@ -14,24 +14,35 @@ import { LogEntry } from "../../logger/log-entry" interface RunPodParams { context: string, - namespace: string, - module: Module, image: string, envVars: PrimitiveMap, command?: string[], args: string[], interactive: boolean, ignoreError: boolean, - timeout?: number, - overrides?: any, log: LogEntry, + module: Module, + namespace: string, + overrides?: any, + podName?: string, + timeout?: number, } export async function runPod( { - context, namespace, module, image, envVars, - command, args, interactive, ignoreError, - timeout, overrides, log, + args, + command, + context, + envVars, + ignoreError, + image, + interactive, + log, + module, + namespace, + overrides, + podName, + timeout, }: RunPodParams, ): Promise { const envArgs = Object.entries(envVars).map(([k, v]) => `--env=${k}=${v}`) @@ -60,7 +71,8 @@ export async function runPod( } const kubecmd = [ - "run", `run-${module.name}-${Math.round(new Date().getTime())}`, + "run", + podName || `run-${module.name}-${Math.round(new Date().getTime())}`, ...opts, ...envArgs, "--", diff --git a/garden-service/src/plugins/kubernetes/status.ts b/garden-service/src/plugins/kubernetes/status.ts index 537d3be911..58a43b93f1 100644 --- a/garden-service/src/plugins/kubernetes/status.ts +++ b/garden-service/src/plugins/kubernetes/status.ts @@ -606,13 +606,14 @@ function removeNull(value: T | Iterable): T | Iterable | { [K in keyof async function getPodLogs(api: KubeApi, namespace: string, podNames: string[]): Promise { const allLogs = await Bluebird.map(podNames, async (name) => { let containerName: string | undefined + try { const podRes = await api.core.readNamespacedPod(name, namespace) const containerNames = podRes.body.spec.containers.map(c => c.name) if (containerNames.length > 1) { - containerName = containerNames.filter(n => !n.match(/garden-/))[0] + containerName = containerNames.filter(n => !n.match(/garden-/))[0] || containerNames[0] } else { - containerName = undefined + containerName = containerNames[0] } } catch (err) { if (err.code === 404) { @@ -621,6 +622,7 @@ async function getPodLogs(api: KubeApi, namespace: string, podNames: string[]): throw err } } + // Putting 5000 bytes as a length limit in addition to the line limit, just as a precaution in case someone // accidentally logs a binary file or something. try { diff --git a/garden-service/src/plugins/kubernetes/util.ts b/garden-service/src/plugins/kubernetes/util.ts index 8e1814284d..714d121301 100644 --- a/garden-service/src/plugins/kubernetes/util.ts +++ b/garden-service/src/plugins/kubernetes/util.ts @@ -11,6 +11,7 @@ import { get, flatten, uniqBy } from "lodash" import { ChildProcess } from "child_process" import { V1Pod } from "@kubernetes/client-node" import getPort = require("get-port") +const AsyncLock = require("async-lock") import { KubernetesResource } from "./types" import { splitLast } from "../../util/util" @@ -105,6 +106,7 @@ export interface PortForward { } const registeredPortForwards: { [key: string]: PortForward } = {} +const portForwardRegistrationLock = new AsyncLock() registerCleanupFunction("kill-port-forward-procs", () => { for (const { proc } of Object.values(registeredPortForwards)) { @@ -113,49 +115,54 @@ registerCleanupFunction("kill-port-forward-procs", () => { }) export async function getPortForward( - ctx: PluginContext, log: LogEntry, namespace: string, targetDeployment: string, port: number, + { ctx, log, namespace, targetDeployment, port }: + { ctx: PluginContext, log: LogEntry, namespace: string, targetDeployment: string, port: number }, ): Promise { - let localPort: number - + // Using lock here to avoid concurrency issues (multiple parallel requests for same forward). const key = `${targetDeployment}:${port}` - const registered = registeredPortForwards[key] - if (registered && !registered.proc.killed) { - log.debug(`Reusing local port ${registered.localPort} for ${targetDeployment} container`) - return registered - } + return portForwardRegistrationLock.acquire("register-port-forward", (async () => { + let localPort: number - const k8sCtx = ctx + const registered = registeredPortForwards[key] - // Forward random free local port to the remote rsync container. - localPort = await getPort() - const portMapping = `${localPort}:${port}` + if (registered && !registered.proc.killed) { + log.debug(`Reusing local port ${registered.localPort} for ${targetDeployment} container`) + return registered + } - log.debug(`Forwarding local port ${localPort} to ${targetDeployment} container port ${port}`) + const k8sCtx = ctx - // TODO: use the API directly instead of kubectl (need to reverse engineer kubectl a bit to get how that works) - const portForwardArgs = ["port-forward", targetDeployment, portMapping] - log.silly(`Running 'kubectl ${portForwardArgs.join(" ")}'`) + // Forward random free local port to the remote rsync container. + localPort = await getPort() + const portMapping = `${localPort}:${port}` - const proc = await kubectl.spawn({ log, context: k8sCtx.provider.config.context, namespace, args: portForwardArgs }) + log.debug(`Forwarding local port ${localPort} to ${targetDeployment} container port ${port}`) - return new Promise((resolve) => { - proc.on("error", (error) => { - !proc.killed && proc.kill() - throw error - }) + // TODO: use the API directly instead of kubectl (need to reverse engineer kubectl a bit to get how that works) + const portForwardArgs = ["port-forward", targetDeployment, portMapping] + log.silly(`Running 'kubectl ${portForwardArgs.join(" ")}'`) - proc.stdout!.on("data", (line) => { - // This is unfortunately the best indication that we have that the connection is up... - log.silly(`[${targetDeployment} port forwarder] ${line}`) + const proc = await kubectl.spawn({ log, context: k8sCtx.provider.config.context, namespace, args: portForwardArgs }) - if (line.toString().includes("Forwarding from ")) { - const portForward = { proc, localPort } - registeredPortForwards[key] = portForward - resolve(portForward) - } + return new Promise((resolve) => { + proc.on("error", (error) => { + !proc.killed && proc.kill() + throw error + }) + + proc.stdout!.on("data", (line) => { + // This is unfortunately the best indication that we have that the connection is up... + log.silly(`[${targetDeployment} port forwarder] ${line}`) + + if (line.toString().includes("Forwarding from ")) { + const portForward = { proc, localPort } + registeredPortForwards[key] = portForward + resolve(portForward) + } + }) }) - }) + })) } /** diff --git a/garden-service/src/util/fs.ts b/garden-service/src/util/fs.ts index 35b2e64ae2..b6493a0d5e 100644 --- a/garden-service/src/util/fs.ts +++ b/garden-service/src/util/fs.ts @@ -8,16 +8,18 @@ import klaw = require("klaw") import * as _spawn from "cross-spawn" -import { pathExists, readFile } from "fs-extra" import * as Bluebird from "bluebird" +import { pathExists, readFile, writeFile } from "fs-extra" import minimatch = require("minimatch") import { some } from "lodash" +import * as uuid from "uuid" import { join, basename, win32, posix, relative, parse } from "path" import { ValidationError } from "../exceptions" // NOTE: Importing from ignore/ignore doesn't work on Windows const ignore = require("ignore") const VALID_CONFIG_FILENAMES = ["garden.yml", "garden.yaml"] +const metadataFilename = "metadata.json" /* Warning: Don't make any async calls in the loop body when using this function, since this may cause @@ -191,3 +193,29 @@ export function toCygwinPath(path: string) { export function matchGlobs(path: string, patterns: string[]): boolean { return some(patterns, pattern => minimatch(path, pattern)) } + +/** + * Gets an ID for the current working copy, given the path to the project's `.garden` directory. + * We do this by storing a `metadata` file in the directory with an ID. The file is created on demand and a new + * ID is set when it is first generated. + * + * The implication is that removing the `.garden` directory resets the ID, so any remote data attached to the ID + * will be orphaned. Which is usually not a big issue, but something to be mindful of. + */ +export async function getWorkingCopyId(gardenDirPath: string) { + const metadataPath = join(gardenDirPath, metadataFilename) + + let metadata = { + workingCopyId: uuid.v4(), + } + + // TODO: do this in a fully concurrency-safe way + if (await pathExists(metadataPath)) { + const metadataContent = await readFile(metadataPath) + metadata = JSON.parse(metadataContent.toString()) + } else { + await writeFile(metadataPath, JSON.stringify(metadata)) + } + + return metadata.workingCopyId +} diff --git a/garden-service/static/kubernetes/system/build-sync/Chart.yaml b/garden-service/static/kubernetes/system/build-sync/Chart.yaml new file mode 100644 index 0000000000..f0fe852da0 --- /dev/null +++ b/garden-service/static/kubernetes/system/build-sync/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: garden-build-sync +version: 0.1.0 diff --git a/garden-service/static/kubernetes/system/build-sync/garden.yml b/garden-service/static/kubernetes/system/build-sync/garden.yml new file mode 100644 index 0000000000..faa2c5f897 --- /dev/null +++ b/garden-service/static/kubernetes/system/build-sync/garden.yml @@ -0,0 +1,18 @@ +kind: Module +type: helm +name: build-sync +description: Sync service for receiving build context ahead of in-cluster builds +releaseName: garden-build-sync +dependencies: + - nfs-provisioner +values: + resources: + limits: + cpu: ${var.sync-limits-cpu} + memory: ${var.sync-limits-memory} + requests: + cpu: ${var.sync-requests-cpu} + memory: ${var.sync-requests-memory} + storage: + request: ${var.sync-storage-size} + storageClass: garden-system-nfs diff --git a/garden-service/static/kubernetes/system/build-sync/templates/NOTES.txt b/garden-service/static/kubernetes/system/build-sync/templates/NOTES.txt new file mode 100644 index 0000000000..41efc675ff --- /dev/null +++ b/garden-service/static/kubernetes/system/build-sync/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "garden-build-sync.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "garden-build-sync.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "garden-build-sync.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "garden-build-sync.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/garden-service/static/kubernetes/system/build-sync/templates/_helpers.tpl b/garden-service/static/kubernetes/system/build-sync/templates/_helpers.tpl new file mode 100644 index 0000000000..09fed90869 --- /dev/null +++ b/garden-service/static/kubernetes/system/build-sync/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "garden-build-sync.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "garden-build-sync.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "garden-build-sync.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/garden-service/static/kubernetes/system/build-sync/templates/deployment.yaml b/garden-service/static/kubernetes/system/build-sync/templates/deployment.yaml new file mode 100644 index 0000000000..1cfc1673b1 --- /dev/null +++ b/garden-service/static/kubernetes/system/build-sync/templates/deployment.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "garden-build-sync.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "garden-build-sync.name" . }} + helm.sh/chart: {{ include "garden-build-sync.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "garden-build-sync.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "garden-build-sync.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + volumes: + - name: garden-build-sync + persistentVolumeClaim: + claimName: garden-build-sync + containers: + - name: sync + image: "eugenmayer/rsync:latest" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: rsync + containerPort: 873 + protocol: TCP + livenessProbe: + tcpSocket: + port: 873 + readinessProbe: + tcpSocket: + port: 873 + volumeMounts: + - mountPath: /data + name: garden-build-sync + env: + # The service is not exposed at all outside the cluster, so this should be all good. + - name: ALLOW + value: "0.0.0.0/0" + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/garden-service/static/kubernetes/system/build-sync/templates/volume.yaml b/garden-service/static/kubernetes/system/build-sync/templates/volume.yaml new file mode 100644 index 0000000000..cbc9b1c472 --- /dev/null +++ b/garden-service/static/kubernetes/system/build-sync/templates/volume.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: garden-build-sync +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.storage.request }} +{{- if .Values.storage.storageClass }} +{{- if (eq "-" .Values.storage.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.storage.storageClass }}" +{{- end }} +{{- end }} \ No newline at end of file diff --git a/garden-service/static/kubernetes/system/build-sync/values.yaml b/garden-service/static/kubernetes/system/build-sync/values.yaml new file mode 100644 index 0000000000..74e86209ef --- /dev/null +++ b/garden-service/static/kubernetes/system/build-sync/values.yaml @@ -0,0 +1,29 @@ +# Default values for garden-build-sync. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + pullPolicy: IfNotPresent + +nameOverride: "garden-build-sync" +fullnameOverride: "garden-build-sync" + +service: + type: ClusterIP + port: 2375 + +resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi +storage: + request: 2Gi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/garden-service/static/kubernetes/system/docker-daemon/garden.yml b/garden-service/static/kubernetes/system/docker-daemon/garden.yml index 0097aeede2..a0a4df3245 100644 --- a/garden-service/static/kubernetes/system/docker-daemon/garden.yml +++ b/garden-service/static/kubernetes/system/docker-daemon/garden.yml @@ -3,21 +3,20 @@ type: helm name: docker-daemon description: Docker daemon used for in-cluster building releaseName: garden-docker-daemon +dependencies: + - build-sync values: - dockerDaemon: - resources: - limits: - cpu: ${var.builder-limits-cpu} - memory: ${var.builder-limits-memory} - requests: - cpu: ${var.builder-requests-cpu} - memory: ${var.builder-requests-memory} - registry: - hostname: ${var.registry-hostname || "foo"} - # tlsSecretName: ${variables.registry-tls-secret-name} - storage: - size: ${var.builder-storage-size} - storageClass: ${var.builder-storage-class} - sync: - storage: - storageClass: ${var.builder-storage-class} + mode: ${var.builder-mode} + resources: + limits: + cpu: ${var.builder-limits-cpu} + memory: ${var.builder-limits-memory} + requests: + cpu: ${var.builder-requests-cpu} + memory: ${var.builder-requests-memory} + registry: + hostname: ${var.registry-hostname || "foo"} + # tlsSecretName: ${variables.registry-tls-secret-name} + storage: + size: ${var.builder-storage-size} + storageClass: ${var.builder-storage-class} diff --git a/garden-service/static/kubernetes/system/docker-daemon/templates/deployment.yaml b/garden-service/static/kubernetes/system/docker-daemon/templates/deployment.yaml index 366d9ab582..2519987684 100644 --- a/garden-service/static/kubernetes/system/docker-daemon/templates/deployment.yaml +++ b/garden-service/static/kubernetes/system/docker-daemon/templates/deployment.yaml @@ -26,12 +26,12 @@ spec: - name: garden-docker-data persistentVolumeClaim: claimName: garden-docker-data - - name: garden-build-sync-data + - name: garden-build-sync persistentVolumeClaim: - claimName: garden-build-sync-data + claimName: garden-build-sync # - name: garden-registry-tls # secret: - # secretName: {{ .Values.dockerDaemon.registry.tlsSecretName }} + # secretName: foo # items: # - key: tls.crt # path: localhost:5000/ca.crt @@ -54,43 +54,21 @@ spec: volumeMounts: - name: garden-docker-data mountPath: /var/lib/docker - - name: garden-build-sync-data + - name: garden-build-sync mountPath: /garden-build # Need to mount the registry cert so that the daemon trusts it # - name: garden-registry-tls # mountPath: /etc/docker/certs.d resources: - {{- toYaml .Values.dockerDaemon.resources | nindent 12 }} - - name: {{ .Chart.Name }}-sync - image: "eugenmayer/rsync:latest" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: docker - containerPort: 873 - protocol: TCP - livenessProbe: - tcpSocket: - port: 873 - readinessProbe: - tcpSocket: - port: 873 - volumeMounts: - - mountPath: /data - name: garden-build-sync-data - env: - # The service is not exposed at all outside the cluster, so this should be all good. - - name: ALLOW - value: "0.0.0.0/0" - resources: - {{- toYaml .Values.sync.resources | nindent 12 }} - - name: {{ .Chart.Name }}-proxy + {{- toYaml .Values.resources | nindent 12 }} + - name: proxy image: "basi/socat:v0.1.0" imagePullPolicy: {{ .Values.image.pullPolicy }} command: - /bin/sh - -c - | - socat -d -d TCP-LISTEN:5000,fork TCP:{{ .Values.dockerDaemon.registry.hostname }}:5000 + socat TCP-LISTEN:5000,fork TCP:{{ .Values.registry.hostname }}:5000 ports: - name: proxy containerPort: 5000 diff --git a/garden-service/static/kubernetes/system/docker-daemon/templates/service.yaml b/garden-service/static/kubernetes/system/docker-daemon/templates/service.yaml deleted file mode 100644 index d4127c51a1..0000000000 --- a/garden-service/static/kubernetes/system/docker-daemon/templates/service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "docker-daemon.fullname" . }} - labels: - app.kubernetes.io/name: {{ include "docker-daemon.name" . }} - helm.sh/chart: {{ include "docker-daemon.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: 2375 - protocol: TCP - name: docker - selector: - app.kubernetes.io/name: {{ include "docker-daemon.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/garden-service/static/kubernetes/system/docker-daemon/templates/volume.yaml b/garden-service/static/kubernetes/system/docker-daemon/templates/volume.yaml index 988cd74b78..bd353a7ea1 100644 --- a/garden-service/static/kubernetes/system/docker-daemon/templates/volume.yaml +++ b/garden-service/static/kubernetes/system/docker-daemon/templates/volume.yaml @@ -7,29 +7,11 @@ spec: - ReadWriteOnce resources: requests: - storage: {{ .Values.dockerDaemon.storage.request }} -{{- if .Values.dockerDaemon.storage.storageClass }} -{{- if (eq "-" .Values.dockerDaemon.storage.storageClass) }} + storage: {{ .Values.storage.request }} +{{- if .Values.storage.storageClass }} +{{- if (eq "-" .Values.storage.storageClass) }} storageClassName: "" {{- else }} - storageClassName: "{{ .Values.dockerDaemon.storage.storageClass }}" -{{- end }} -{{- end }} ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: garden-build-sync-data -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.sync.storage.request }} -{{- if .Values.sync.storage.storageClass }} -{{- if (eq "-" .Values.sync.storage.storageClass) }} - storageClassName: "" -{{- else }} - storageClassName: "{{ .Values.sync.storage.storageClass }}" + storageClassName: "{{ .Values.storage.storageClass }}" {{- end }} {{- end }} \ No newline at end of file diff --git a/garden-service/static/kubernetes/system/docker-daemon/values.yaml b/garden-service/static/kubernetes/system/docker-daemon/values.yaml index eedffc1af4..3baf6da05d 100644 --- a/garden-service/static/kubernetes/system/docker-daemon/values.yaml +++ b/garden-service/static/kubernetes/system/docker-daemon/values.yaml @@ -14,29 +14,18 @@ service: type: ClusterIP port: 2375 -dockerDaemon: - resources: - limits: - cpu: "2" - memory: 4Gi - requests: - cpu: 200m - memory: 256Mi - storage: - request: 20Gi - registry: - hostname: garden-docker-registry - -sync: - resources: - limits: - cpu: 200m - memory: 256Mi - requests: - cpu: 100m - memory: 128Mi - storage: - request: 2Gi +mode: cluster-docker +resources: + limits: + cpu: "2" + memory: 4Gi + requests: + cpu: 200m + memory: 256Mi +storage: + request: 20Gi +registry: + hostname: garden-docker-registry nodeSelector: {} diff --git a/garden-service/static/kubernetes/system/nfs-provisioner/garden.yml b/garden-service/static/kubernetes/system/nfs-provisioner/garden.yml new file mode 100644 index 0000000000..e65f5d8dc9 --- /dev/null +++ b/garden-service/static/kubernetes/system/nfs-provisioner/garden.yml @@ -0,0 +1,15 @@ +kind: Module +name: nfs-provisioner +description: Provisioner for NFS volumes used by Garden system services +type: helm +chart: stable/nfs-server-provisioner +version: "0.3.0" +releaseName: garden-nfs-provisioner +values: + nameOverride: garden-nfs-provisioner + fullnameOverride: garden-nfs-provisioner + persistence: + enable: true + size: 50Gi + storageClass: + name: garden-system-nfs diff --git a/garden-service/static/kubernetes/system/registry-proxy/Chart.yaml b/garden-service/static/kubernetes/system/registry-proxy/Chart.yaml index f29ca00d22..74df62f2fa 100644 --- a/garden-service/static/kubernetes/system/registry-proxy/Chart.yaml +++ b/garden-service/static/kubernetes/system/registry-proxy/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 appVersion: "1.0" description: A Helm chart for Kubernetes -name: docker-daemon +name: registry-proxy version: 0.1.0 diff --git a/garden-service/static/kubernetes/system/registry-proxy/garden.yml b/garden-service/static/kubernetes/system/registry-proxy/garden.yml index fc907f61c2..1a380ddb43 100644 --- a/garden-service/static/kubernetes/system/registry-proxy/garden.yml +++ b/garden-service/static/kubernetes/system/registry-proxy/garden.yml @@ -4,7 +4,6 @@ name: registry-proxy description: DaemonSet that proxies connections to the docker registry service on each node releaseName: garden-registry-proxy values: - dockerDaemon: - registry: - hostname: ${var.registry-hostname || "foo"} - # tlsSecretName: ${variables.registry-tls-secret-name} + registry: + hostname: ${var.registry-hostname || "foo"} + # tlsSecretName: ${variables.registry-tls-secret-name} diff --git a/garden-service/static/kubernetes/system/registry-proxy/templates/_helpers.tpl b/garden-service/static/kubernetes/system/registry-proxy/templates/_helpers.tpl index ea5b5b94df..240bdcb005 100644 --- a/garden-service/static/kubernetes/system/registry-proxy/templates/_helpers.tpl +++ b/garden-service/static/kubernetes/system/registry-proxy/templates/_helpers.tpl @@ -2,7 +2,7 @@ {{/* Expand the name of the chart. */}} -{{- define "docker-daemon.name" -}} +{{- define "registry-proxy.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} @@ -11,7 +11,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "docker-daemon.fullname" -}} +{{- define "registry-proxy.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} @@ -27,6 +27,6 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "docker-daemon.chart" -}} +{{- define "registry-proxy.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} diff --git a/garden-service/static/kubernetes/system/registry-proxy/templates/daemonset.yaml b/garden-service/static/kubernetes/system/registry-proxy/templates/daemonset.yaml index 119c870bab..9588d6eb35 100644 --- a/garden-service/static/kubernetes/system/registry-proxy/templates/daemonset.yaml +++ b/garden-service/static/kubernetes/system/registry-proxy/templates/daemonset.yaml @@ -1,16 +1,16 @@ apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ include "docker-daemon.fullname" . }} + name: {{ include "registry-proxy.fullname" . }} labels: - app.kubernetes.io/name: {{ include "docker-daemon.name" . }} - helm.sh/chart: {{ include "docker-daemon.chart" . }} + app.kubernetes.io/name: {{ include "registry-proxy.name" . }} + helm.sh/chart: {{ include "registry-proxy.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} spec: selector: matchLabels: - app.kubernetes.io/name: {{ include "docker-daemon.name" . }} + app.kubernetes.io/name: {{ include "registry-proxy.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} updateStrategy: rollingUpdate: @@ -18,19 +18,9 @@ spec: template: metadata: labels: - app.kubernetes.io/name: {{ include "docker-daemon.name" . }} + app.kubernetes.io/name: {{ include "registry-proxy.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} spec: - volumes: - # - name: docker-certs - # hostPath: - # path: /etc/docker/certs.d - # - name: garden-registry-tls - # secret: - # secretName: {{ .Values.dockerDaemon.registry.tlsSecretName }} - # items: - # - key: tls.crt - # path: ca.crt containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" @@ -42,28 +32,20 @@ spec: # Copy the registry certs to the host # cp -r /certs/localhost /etc/docker/certs.d/localhost:5000 # Proxy node connections on 127.0.0.1:5000 to the docker registry - socat -d -d -d TCP-LISTEN:5000,fork,range=10.0.0.0/8 TCP:{{ .Values.dockerDaemon.registry.hostname }}:5000 + socat -d TCP-LISTEN:5000,fork,range=10.0.0.0/8 TCP:{{ .Values.registry.hostname }}:5000 ports: - name: docker containerPort: 5000 hostPort: 5000 protocol: TCP - # securityContext: - # privileged: true # livenessProbe: # tcpSocket: # port: 5000 # readinessProbe: # tcpSocket: # port: 5000 - # volumeMounts: - # Need to add the registry cert to the node so that the daemon trusts it - # - name: docker-certs - # mountPath: "/etc/docker/certs.d" - # - name: garden-registry-tls - # mountPath: "/certs/localhost" resources: - {{- toYaml .Values.dockerDaemon.resources | nindent 12 }} + {{- toYaml .Values.resources | nindent 12 }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/garden-service/static/kubernetes/system/registry-proxy/values.yaml b/garden-service/static/kubernetes/system/registry-proxy/values.yaml index 75ff85eb78..8cc38bc6e0 100644 --- a/garden-service/static/kubernetes/system/registry-proxy/values.yaml +++ b/garden-service/static/kubernetes/system/registry-proxy/values.yaml @@ -1,4 +1,4 @@ -# Default values for docker-daemon. +# Default values for garden-registry-proxy. # This is a YAML-formatted file. # Declare variables to be passed into your templates. @@ -10,10 +10,9 @@ image: nameOverride: garden-registry-proxy fullnameOverride: garden-registry-proxy -dockerDaemon: - registry: - hostname: garden-docker-registry - tlsSecretName: garden-docker-registry-tls +registry: + hostname: garden-docker-registry + tlsSecretName: garden-docker-registry-tls nodeSelector: {} diff --git a/garden-service/test/helpers.ts b/garden-service/test/helpers.ts index 5f7d350673..6249527111 100644 --- a/garden-service/test/helpers.ts +++ b/garden-service/test/helpers.ts @@ -14,7 +14,7 @@ import { remove, readdirSync, existsSync } from "fs-extra" import { containerModuleSpecSchema, containerTestSchema, containerTaskSchema } from "../src/plugins/container/config" import { testExecModule, buildExecModule, execBuildSpecSchema } from "../src/plugins/exec" import { TaskResults } from "../src/task-graph" -import { validate, PrimitiveMap, joiArray } from "../src/config/common" +import { validate, joiArray } from "../src/config/common" import { GardenPlugin, PluginActions, @@ -22,18 +22,14 @@ import { ModuleActions, Plugins, } from "../src/types/plugin/plugin" -import { Garden, GardenOpts } from "../src/garden" +import { Garden, GardenParams } from "../src/garden" import { ModuleConfig } from "../src/config/module" import { mapValues, fromPairs } from "lodash" import { ModuleVersion } from "../src/vcs/vcs" import { GARDEN_SERVICE_ROOT } from "../src/constants" import { EventBus, Events } from "../src/events" import { ValueOf } from "../src/util/util" -import { Ignorer } from "../src/util/fs" -import { SourceConfig } from "../src/config/project" -import { BuildDir } from "../src/build-dir" import { LogEntry } from "../src/logger/log-entry" -import { ProviderConfig } from "../src/config/provider" import timekeeper = require("timekeeper") import { GLOBAL_OPTIONS } from "../src/cli/cli" import { RunModuleParams } from "../src/types/plugin/module/runModule" @@ -291,23 +287,8 @@ class TestEventBus extends EventBus { export class TestGarden extends Garden { events: TestEventBus - constructor( - public readonly projectRoot: string, - public readonly projectName: string, - public readonly environmentName: string, - public readonly variables: PrimitiveMap, - public readonly projectSources: SourceConfig[] = [], - public readonly buildDir: BuildDir, - public readonly gardenDirPath: string, - public readonly ignorer: Ignorer, - public readonly opts: GardenOpts, - plugins: Plugins, - providerConfigs: ProviderConfig[], - ) { - super( - projectRoot, projectName, environmentName, variables, projectSources, - buildDir, gardenDirPath, ignorer, opts, plugins, providerConfigs, - ) + constructor(params: GardenParams) { + super(params) this.events = new TestEventBus(this.log) } } diff --git a/garden-service/test/unit/src/plugins/container.ts b/garden-service/test/unit/src/plugins/container.ts index 068fc4bd0f..292032bbc8 100644 --- a/garden-service/test/unit/src/plugins/container.ts +++ b/garden-service/test/unit/src/plugins/container.ts @@ -283,6 +283,15 @@ describe("plugins.container", () => { }) }) + it("should correctly parse an id with a host with a port, and namespace", () => { + expect(helpers.parseImageId("localhost:5000/namespace/image:tag")).to.eql({ + host: "localhost:5000", + namespace: "namespace", + repository: "image", + tag: "tag", + }) + }) + it("should correctly parse an id with a host and multi-level namespace", () => { expect(helpers.parseImageId("my-host.com/a/b/c/d/image:tag")).to.eql({ host: "my-host.com", diff --git a/garden-service/test/unit/src/plugins/kubernetes/container/ingress.ts b/garden-service/test/unit/src/plugins/kubernetes/container/ingress.ts index 5f1c1b606b..162ee0cf76 100644 --- a/garden-service/test/unit/src/plugins/kubernetes/container/ingress.ts +++ b/garden-service/test/unit/src/plugins/kubernetes/container/ingress.ts @@ -33,7 +33,7 @@ const ports = [{ const basicConfig: KubernetesConfig = { name: "kubernetes", - buildMode: "local", + buildMode: "local-docker", context: "my-cluster", defaultHostname: "my.domain.com", deploymentRegistry: { diff --git a/garden-service/test/unit/src/util/fs.ts b/garden-service/test/unit/src/util/fs.ts index 9777f715b9..19d089e56c 100644 --- a/garden-service/test/unit/src/util/fs.ts +++ b/garden-service/test/unit/src/util/fs.ts @@ -7,7 +7,9 @@ import { getChildDirNames, isConfigFilename, getConfigFilePath, + getWorkingCopyId, } from "../../../../src/util/fs" +import { withDir } from "tmp-promise" const projectYamlFileExtensions = getDataDir("test-project-yaml-file-extensions") const projectDuplicateYamlFileExtensions = getDataDir("test-project-duplicate-yaml-file-extensions") @@ -100,4 +102,22 @@ describe("util", () => { } }) }) + + describe("getWorkingCopyId", () => { + it("should generate and return a new ID for an empty directory", async () => { + return withDir(async (dir) => { + const id = await getWorkingCopyId(dir.path) + expect(id).to.be.string + }, { unsafeCleanup: true }) + }) + + it("should return the same ID after generating for the first time", async () => { + return withDir(async (dir) => { + const idA = await getWorkingCopyId(dir.path) + const idB = await getWorkingCopyId(dir.path) + + expect(idA).to.equal(idB) + }, { unsafeCleanup: true }) + }) + }) }) diff --git a/garden-service/test/unit/src/util/util.ts b/garden-service/test/unit/src/util/util.ts index 0712831a39..f6c3f555a7 100644 --- a/garden-service/test/unit/src/util/util.ts +++ b/garden-service/test/unit/src/util/util.ts @@ -4,8 +4,10 @@ import { getEnvVarName, deepOmitUndefined, deepFilter, + splitLast, } from "../../../../src/util/util" import { expectError } from "../../../helpers" +import { splitFirst } from "../../../../src/util/util" describe("util", () => { describe("getEnvVarName", () => { @@ -118,4 +120,24 @@ describe("util", () => { expect(deepOmitUndefined(obj)).to.eql({ a: 1, b: 2, c: [{ d: 3 }] }) }) }) + + describe("splitFirst", () => { + it("should split string on first occurrence of given delimiter", () => { + expect(splitFirst("foo:bar:boo", ":")).to.eql(["foo", "bar:boo"]) + }) + + it("should return the whole string as first element when no delimiter is found in string", () => { + expect(splitFirst("foo", ":")).to.eql(["foo", ""]) + }) + }) + + describe("splitLast", () => { + it("should split string on last occurrence of given delimiter", () => { + expect(splitLast("foo:bar:boo", ":")).to.eql(["foo:bar", "boo"]) + }) + + it("should return the whole string as last element when no delimiter is found in string", () => { + expect(splitLast("foo", ":")).to.eql(["", "foo"]) + }) + }) })