-
Notifications
You must be signed in to change notification settings - Fork 8.3k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[build] Support for generating docker image (#28380)
- Loading branch information
1 parent
13a213c
commit 3a1d4ad
Showing
11 changed files
with
520 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
/* | ||
* Licensed to Elasticsearch B.V. under one or more contributor | ||
* license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright | ||
* ownership. Elasticsearch B.V. licenses this file to you under | ||
* the Apache License, Version 2.0 (the "License"); you may | ||
* not use this file except in compliance with the License. | ||
* You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, | ||
* software distributed under the License is distributed on an | ||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
* KIND, either express or implied. See the License for the | ||
* specific language governing permissions and limitations | ||
* under the License. | ||
*/ | ||
|
||
export * from './run'; |
175 changes: 175 additions & 0 deletions
175
src/dev/build/tasks/os_packages/docker_generator/resources/bin/kibana-docker
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,175 @@ | ||
#!/bin/bash | ||
|
||
# Run Kibana, using environment variables to set longopts defining Kibana's | ||
# configuration. | ||
# | ||
# eg. Setting the environment variable: | ||
# | ||
# ELASTICSEARCH_STARTUPTIMEOUT=60 | ||
# | ||
# will cause Kibana to be invoked with: | ||
# | ||
# --elasticsearch.startupTimeout=60 | ||
|
||
kibana_vars=( | ||
console.enabled | ||
console.proxyConfig | ||
console.proxyFilter | ||
elasticsearch.customHeaders | ||
elasticsearch.hosts | ||
elasticsearch.logQueries | ||
elasticsearch.password | ||
elasticsearch.pingTimeout | ||
elasticsearch.preserveHost | ||
elasticsearch.requestHeadersWhitelist | ||
elasticsearch.requestTimeout | ||
elasticsearch.shardTimeout | ||
elasticsearch.sniffInterval | ||
elasticsearch.sniffOnConnectionFault | ||
elasticsearch.sniffOnStart | ||
elasticsearch.ssl.certificate | ||
elasticsearch.ssl.certificateAuthorities | ||
elasticsearch.ssl.key | ||
elasticsearch.ssl.keyPassphrase | ||
elasticsearch.ssl.verificationMode | ||
elasticsearch.startupTimeout | ||
elasticsearch.username | ||
i18n.locale | ||
kibana.defaultAppId | ||
kibana.index | ||
logging.dest | ||
logging.quiet | ||
logging.silent | ||
logging.useUTC | ||
logging.verbose | ||
map.includeElasticMapsService | ||
ops.interval | ||
path.data | ||
pid.file | ||
regionmap | ||
regionmap.includeElasticMapsService | ||
server.basePath | ||
server.customResponseHeaders | ||
server.defaultRoute | ||
server.host | ||
server.maxPayloadBytes | ||
server.name | ||
server.port | ||
server.rewriteBasePath | ||
server.ssl.cert | ||
server.ssl.certificate | ||
server.ssl.certificateAuthorities | ||
server.ssl.cipherSuites | ||
server.ssl.clientAuthentication | ||
server.customResponseHeaders | ||
server.ssl.enabled | ||
server.ssl.key | ||
server.ssl.keyPassphrase | ||
server.ssl.redirectHttpFromPort | ||
server.ssl.supportedProtocols | ||
server.xsrf.whitelist | ||
status.allowAnonymous | ||
status.v6ApiFormat | ||
tilemap.options.attribution | ||
tilemap.options.maxZoom | ||
tilemap.options.minZoom | ||
tilemap.options.subdomains | ||
tilemap.url | ||
timelion.enabled | ||
vega.enableExternalUrls | ||
xpack.apm.enabled | ||
xpack.apm.ui.enabled | ||
xpack.canvas.enabled | ||
xpack.graph.enabled | ||
xpack.grokdebugger.enabled | ||
xpack.infra.enabled | ||
xpack.infra.query.partitionFactor | ||
xpack.infra.query.partitionSize | ||
xpack.infra.sources.default.fields.container | ||
xpack.infra.sources.default.fields.host | ||
xpack.infra.sources.default.fields.message | ||
xpack.infra.sources.default.fields.pod | ||
xpack.infra.sources.default.fields.tiebreaker | ||
xpack.infra.sources.default.fields.timestamp | ||
xpack.infra.sources.default.logAlias | ||
xpack.infra.sources.default.metricAlias | ||
xpack.ml.enabled | ||
xpack.monitoring.elasticsearch.password | ||
xpack.monitoring.elasticsearch.pingTimeout | ||
xpack.monitoring.elasticsearch.hosts | ||
xpack.monitoring.elasticsearch.username | ||
xpack.monitoring.elasticsearch.ssl.certificateAuthorities | ||
xpack.monitoring.elasticsearch.ssl.verificationMode | ||
xpack.monitoring.enabled | ||
xpack.monitoring.kibana.collection.enabled | ||
xpack.monitoring.kibana.collection.interval | ||
xpack.monitoring.max_bucket_size | ||
xpack.monitoring.min_interval_seconds | ||
xpack.monitoring.node_resolver | ||
xpack.monitoring.report_stats | ||
xpack.monitoring.elasticsearch.pingTimeout | ||
xpack.monitoring.ui.container.elasticsearch.enabled | ||
xpack.monitoring.ui.container.logstash.enabled | ||
xpack.monitoring.ui.enabled | ||
xpack.reporting.capture.browser.chromium.disableSandbox | ||
xpack.reporting.capture.browser.chromium.proxy.enabled | ||
xpack.reporting.capture.browser.chromium.proxy.server | ||
xpack.reporting.capture.browser.chromium.proxy.bypass | ||
xpack.reporting.capture.browser.type | ||
xpack.reporting.capture.concurrency | ||
xpack.reporting.capture.loadDelay | ||
xpack.reporting.capture.settleTime | ||
xpack.reporting.capture.timeout | ||
xpack.reporting.csv.maxSizeBytes | ||
xpack.reporting.enabled | ||
xpack.reporting.encryptionKey | ||
xpack.reporting.index | ||
xpack.reporting.kibanaApp | ||
xpack.reporting.kibanaServer.hostname | ||
xpack.reporting.kibanaServer.port | ||
xpack.reporting.kibanaServer.protocol | ||
xpack.reporting.queue.indexInterval | ||
xpack.reporting.queue.pollInterval | ||
xpack.reporting.queue.timeout | ||
xpack.reporting.roles.allow | ||
xpack.searchprofiler.enabled | ||
xpack.security.authProviders | ||
xpack.security.cookieName | ||
xpack.security.enabled | ||
xpack.security.encryptionKey | ||
xpack.security.secureCookies | ||
xpack.security.sessionTimeout | ||
xpack.xpack_main.telemetry.enabled | ||
) | ||
|
||
longopts='' | ||
for kibana_var in ${kibana_vars[*]}; do | ||
# 'elasticsearch.hosts' -> 'ELASTICSEARCH_HOSTS' | ||
env_var=$(echo ${kibana_var^^} | tr . _) | ||
|
||
# Indirectly lookup env var values via the name of the var. | ||
# REF: http://tldp.org/LDP/abs/html/bashver2.html#EX78 | ||
value=${!env_var} | ||
if [[ -n $value ]]; then | ||
longopt="--${kibana_var}=${value}" | ||
longopts+=" ${longopt}" | ||
fi | ||
done | ||
|
||
# Files created at run-time should be group-writable, for Openshift's sake. | ||
umask 0002 | ||
|
||
# The virtual file /proc/self/cgroup should list the current cgroup | ||
# membership. For each hierarchy, you can follow the cgroup path from | ||
# this file to the cgroup filesystem (usually /sys/fs/cgroup/) and | ||
# introspect the statistics for the cgroup for the given | ||
# hierarchy. Alas, Docker breaks this by mounting the container | ||
# statistics at the root while leaving the cgroup paths as the actual | ||
# paths. Therefore, Kibana provides a mechanism to override | ||
# reading the cgroup path from /proc/self/cgroup and instead uses the | ||
# cgroup path defined the configuration properties | ||
# cpu.cgroup.path.override and cpuacct.cgroup.path.override. | ||
# Therefore, we set this value here so that cgroup statistics are | ||
# available for the container this process will run in. | ||
|
||
exec /usr/share/kibana/bin/kibana --cpu.cgroup.path.override=/ --cpuacct.cgroup.path.override=/ ${longopts} "$@" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
/* | ||
* Licensed to Elasticsearch B.V. under one or more contributor | ||
* license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright | ||
* ownership. Elasticsearch B.V. licenses this file to you under | ||
* the Apache License, Version 2.0 (the "License"); you may | ||
* not use this file except in compliance with the License. | ||
* You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, | ||
* software distributed under the License is distributed on an | ||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
* KIND, either express or implied. See the License for the | ||
* specific language governing permissions and limitations | ||
* under the License. | ||
*/ | ||
|
||
import { access, link, unlink, chmod } from 'fs'; | ||
import { resolve } from 'path'; | ||
import { promisify } from 'util'; | ||
import { write, copyAll, mkdirp, exec } from '../../../lib'; | ||
import * as dockerTemplates from './templates'; | ||
|
||
const accessAsync = promisify(access); | ||
const linkAsync = promisify(link); | ||
const unlinkAsync = promisify(unlink); | ||
const chmodAsync = promisify(chmod); | ||
|
||
export async function runDockerGenerator(config, log, build) { | ||
const license = build.isOss() ? 'ASL 2.0' : 'Elastic License'; | ||
const imageFlavor = build.isOss() ? '-oss' : ''; | ||
const imageTag = 'docker.elastic.co/kibana/kibana'; | ||
const versionTag = config.getBuildVersion(); | ||
const artifactTarball = `kibana${ imageFlavor }-${ versionTag }-linux-x86_64.tar.gz`; | ||
const artifactsDir = config.resolveFromTarget('.'); | ||
const dockerBuildDir = config.resolveFromRepo('build', 'kibana-docker', build.isOss() ? 'oss' : 'default'); | ||
const dockerOutputDir = config.resolveFromTarget(`kibana${ imageFlavor }-${ versionTag }-docker.tar.gz`); | ||
|
||
// Verify if we have the needed kibana target in order | ||
// to build the kibana docker image. | ||
// Also create the docker build target folder | ||
// and delete the current linked target into the | ||
// kibana docker build folder if we have one. | ||
try { | ||
await accessAsync(resolve(artifactsDir, artifactTarball)); | ||
await mkdirp(dockerBuildDir); | ||
await unlinkAsync(resolve(dockerBuildDir, artifactTarball)); | ||
} catch (e) { | ||
if (e && e.code === 'ENOENT' && e.syscall === 'access') { | ||
throw new Error( | ||
`Kibana linux target (${ artifactTarball }) is needed in order to build ${'' | ||
}the docker image. None was found at ${ artifactsDir }` | ||
); | ||
} | ||
} | ||
|
||
// Create the kibana linux target inside the | ||
// Kibana docker build | ||
await linkAsync( | ||
resolve(artifactsDir, artifactTarball), | ||
resolve(dockerBuildDir, artifactTarball), | ||
); | ||
|
||
// Write all the needed docker config files | ||
// into kibana-docker folder | ||
const scope = { | ||
artifactTarball, | ||
imageFlavor, | ||
versionTag, | ||
license, | ||
artifactsDir, | ||
imageTag, | ||
dockerOutputDir | ||
}; | ||
|
||
for (const [, dockerTemplate] of Object.entries(dockerTemplates)) { | ||
await write(resolve(dockerBuildDir, dockerTemplate.name), dockerTemplate.generator(scope)); | ||
} | ||
|
||
// Copy all the needed resources into kibana-docker folder | ||
// in order to build the docker image accordingly the dockerfile defined | ||
// under templates/kibana_yml.template/js | ||
await copyAll( | ||
config.resolveFromRepo('src/dev/build/tasks/os_packages/docker_generator/resources'), | ||
dockerBuildDir, | ||
); | ||
|
||
// Build docker image into the target folder | ||
// In order to do this we just call the file we | ||
// created from the templates/build_docker_sh.template.js | ||
// and we just run that bash script | ||
await chmodAsync(`${resolve(dockerBuildDir, 'build_docker.sh')}`, '755'); | ||
await exec(log, `./build_docker.sh`, [], { | ||
cwd: dockerBuildDir, | ||
level: 'info', | ||
}); | ||
} |
Oops, something went wrong.