Skip to content

Commit

Permalink
Add dougabuild - directory containing Jenksinsfiles and Ansible playb…
Browse files Browse the repository at this point in the history
…ook to build an nginx server to view the docs
  • Loading branch information
Dougal Seeley committed May 4, 2021
1 parent a0f4e43 commit a485d9c
Show file tree
Hide file tree
Showing 16 changed files with 430 additions and 0 deletions.
7 changes: 7 additions & 0 deletions docs/dcs.asciidoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
= Dougal Common Schema (DCS) Reference
:doctype: book
:ecs: DCS
:toc: left

include::fields.asciidoc[]
include::field-values.asciidoc[]
12 changes: 12 additions & 0 deletions dougabuild/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
inventory_*
*.retry
.vscode*
.idea*
.DS_Store
venv
*.pyc
Pipfile.lock
roles/clusterverse
/gcp__*.json
id_rsa*
/roles/docserver/files
18 changes: 18 additions & 0 deletions dougabuild/.vaultpass-client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/usr/bin/env python3
import os
import sys
# import argparse

# parser = argparse.ArgumentParser()
# parser.add_argument("--vault-id", type=str)
# args = parser.parse_args()

# if args.vault_id is None or args.vault_id == "None" or args.vault_id == "default":

envvar_vault_pass = "VAULT_PASSWORD_BUILDENV"

if os.environ.get(envvar_vault_pass) is not None and os.environ.get(envvar_vault_pass) != "":
print(os.environ[envvar_vault_pass])
else:
print("ERROR: '" + envvar_vault_pass + "' is not set in environment")
sys.exit(1)
116 changes: 116 additions & 0 deletions dougabuild/Jenksinsfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
#!groovy

//These will not be needed if we're running this as a pipeline SCM job, as these are automatically added to the 'scm' variable, but if we instead just cut & paste this file into a pipeline job, they will be used as fallback
def PROJECT_URL = "https://github.com/dseeley/ecs"

//This allows us to create our own Docker image for this specific use-case. Once it is built, it will not be rebuilt, so only adds delay the first time we use it.
def create_custom_image(image_name, params = "") {
// Create a lock to prevent building the same image in parallel
lock("IMAGEBUILDLOCK__" + env.NODE_NAME) {
def jenkins_username = sh(script: 'whoami', returnStdout: true).trim()
def jenkins_uid = sh(script: "id -u ${jenkins_username}", returnStdout: true).trim()
def jenkins_gid = sh(script: "id -g ${jenkins_username}", returnStdout: true).trim()
def gopath = "${env.JENKINS_HOME}/go"

def dockerfile = """
FROM ubuntu:20.04
ARG DEBIAN_FRONTEND=noninteractive
ENV JENKINS_HOME=${env.JENKINS_HOME}
ENV HOME=${env.JENKINS_HOME}
ENV TZ=Europe/London
ENV GOPATH=${gopath}
ENV PATH=${gopath}/bin:$PATH
RUN groupadd -g ${jenkins_gid} ${jenkins_username} && useradd -m -u ${jenkins_uid} -g ${jenkins_gid} -s /bin/bash ${jenkins_username}
RUN apt-get update && apt-get install -y git iproute2 \
python3-boto python3-boto3 python3-botocore python3-dev python3-distutils python3-dnspython python3-google-auth python3-googleapi python3-libcloud python3-jinja2 python3-jmespath python3-netaddr python3-paramiko python3-pip python3-ruamel.yaml python3-setuptools python3-wheel python3-xmltodict \
&& pip3 install ansible ansible-base \
&& pip3 install -r \$(pip3 show ansible | grep ^Location | sed 's/Location: \\(.*\\)/\\1/')/ansible_collections/azure/azcollection/requirements-azure.txt
RUN apt-get install -y asciidoctor
RUN apt-get install -y golang-go build-essential python3-git python3-venv
""".stripIndent()

writeFile(file: "Dockerfile", text: dockerfile, encoding: "UTF-8")
custom_build = docker.build(image_name, params + "--network host .")

return (custom_build)
}
}

properties([
parameters([
credentials(name: 'GIT_CREDS', credentialType: 'com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl', defaultValue: 'GITHUB_SVC_USER', description: 'Jenkins username/password credentials for GitHub', required: false),
string(name: 'APP_NAME', description: "An optional custom app_name to override the default in the playbook"),
choice(name: 'CLOUD_REGION', choices: ['esxifree/dougalab', 'aws/eu-west-1', 'azure/westeurope', 'gcp/europe-west1'], description: "Choose a cloud/region"),
choice(name: 'BUILDENV', choices: ['tools', 'sandbox', 'dev', 'stage', 'prod'], description: "Choose an environment to deploy"),
string(name: 'CLUSTER_ID', defaultValue: '', description: "Select a cluster_id to deploy", trim: true),
])
])

node {
sh 'printenv | sort'
echo "Params: $params"

def docker_parent_net_str = ""
if (sh(script: 'grep -sq "docker\\|lxc" /proc/1/cgroup', returnStatus: true) == 0) {
println("Running in docker. Getting network to pass to docker-in-docker containers...")
def docker_parent_net_id = sh(script: 'docker inspect $(basename $(cat /proc/1/cpuset)) -f "{{ range .NetworkSettings.Networks }}{{println .NetworkID}}{{end}}" | head -n 1', returnStdout: true).trim()
docker_parent_net_str = "--network ${docker_parent_net_id}"
println("docker_parent_net_str: ${docker_parent_net_str}")
}

create_custom_image("scs", "").inside("${docker_parent_net_str}") {
stage('Check Environment') {
sh 'printenv | sort'
println("common_deploy_vars params:" + params)
}

println("currentBuild.getBuildCauses: " + currentBuild.getBuildCauses())
if (currentBuild.getBuildCauses('hudson.model.Cause$SCMTriggerCause').size() > 0 || currentBuild.getBuildCauses('hudson.model.Cause$UpstreamCause').size() > 0) {
println("Checking out default scm: " + scm.userRemoteConfigs + " -- " + scm.branches)
checkout(scm)
} else {
def GIT_BRANCH = "master"
if (env.CHANGE_BRANCH) {
GIT_BRANCH = 'refs/heads/' + env.CHANGE_BRANCH
} else if (env.BRANCH_NAME) {
GIT_BRANCH = 'refs/heads/' + env.BRANCH_NAME
} else if (env.TAG_NAME) {
GIT_BRANCH = 'refs/tags/' + env.TAG_NAME
}
println("No 'scm' params for clusterverse testsuite repo; using params.")
checkout([$class: 'GitSCM', branches: [[name: "${GIT_BRANCH}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'WipeWorkspace']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: params.GIT_CREDS ? params.GIT_CREDS : '', url: PROJECT_URL]]])
}

stage('Build base ECS (python only)') {
echo 'Build the yaml/json'
sh 'python3 scripts/generator.py'
dir('docs') {
echo 'Build the asciidoc'
sh 'asciidoctor dcs.asciidoc -o index.html'
}
}

stage('Build Experimental (golang needed)') {
echo 'Create virtual env'
sh 'make ve'
echo 'Build the yaml'
sh 'make'
}

if (params.CLOUD_REGION && params.BUILDENV && params.CLUSTER_ID) {
sh 'mkdir -p dougabuild/roles/docserver/files'
sh 'cp -R docs/*.html generated dougabuild/roles/docserver/files'

def (CLOUD_TYPE, REGION) = params.CLOUD_REGION.split('/')

String APP_NAME = ""
if (params.APP_NAME != "" && params.APP_NAME != null) {
APP_NAME = " -e app_name=" + params.APP_NAME
}

stage('Build docserver') {
sh "ansible-playbook -e cloud_type=${CLOUD_TYPE} -e region=${REGION} -e buildenv=${params.BUILDENV} -e clusterid=${params.CLUSTER_ID} cluster.yml $APP_NAME -vvv"
}
}
}
}
67 changes: 67 additions & 0 deletions dougabuild/Jenksinsfile_merge_upstream
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
#!groovy
import groovy.json.JsonOutput

//These will not be needed if we're running this as a pipeline SCM job, as these are automatically added to the 'scm' variable, but if we instead just cut & paste this file into a pipeline job, they will be used as fallback
def PROJECT_URL = "https://github.com/dseeley/ecs"
def UPSTREAM_URL = "https://github.com/elastic/ecs"


properties([
parameters([
credentials(name: 'GIT_CREDS', credentialType: 'com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl', defaultValue: 'GITHUB_SVC_USER', description: 'Jenkins username/password credentials for GitHub', required: false),
gitParameter(name: 'UPSTREAM_RELEASE', type: 'PT_BRANCH_TAG', branch: '', branchFilter: 'upstream/v\\*', tagFilter: 'v\\*', defaultValue: 'master', description: 'Choose the branch/tag from which to merge.', selectedValue: 'NONE', sortMode: 'NONE', useRepository: 'elastic/ecs'),
])
])

node {
sh 'printenv | sort'
echo "Params: $params"

stage('Create upstream merge PR') {
checkout([$class: 'GitSCM', branches: [[name: "*/master"]], extensions: [[$class: 'WipeWorkspace']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: params.GIT_CREDS ? params.GIT_CREDS : '', url: PROJECT_URL]]])
def currentHeadRev = sh(returnStdout: true, script: "git rev-parse HEAD").trim()

withCredentials([usernamePassword(credentialsId: params.GIT_CREDS, passwordVariable: 'GIT_TOKEN_PSW', usernameVariable: 'GIT_TOKEN_USR')]) {
def apiUrlUpstream = "https://api.github.com/repos/" + UPSTREAM_URL.replaceFirst("^(http[s]?://[^/]+/)", "") + "/releases"
def latestReleaseQuery = sh(returnStdout: true, script: "curl -s -H \"Authorization: Token ${GIT_TOKEN_PSW}\" -H \"Accept: application/json\" -H \"Content-type: application/json\" -X GET ${apiUrlUpstream}/latest").trim()
def latestElasticRelease = readJSON text: "${latestReleaseQuery}"
println(latestElasticRelease)

sh "git config user.name ${params.GIT_CREDS}"
sh "git config user.email ${params.GIT_CREDS}"
sh "git remote add upstream ${UPSTREAM_URL}.git"
sh "git fetch upstream"

def ECSReleaseBody = sh(returnStdout: true, script: "git log HEAD..${latestElasticRelease.tag_name} --pretty=format:\"<li> %H - %s</li>\"").trim()
// def ECSReleaseBody = sh(returnStdout: true, script: "git log ${currentHeadRev}..HEAD --pretty=format:\"<li> %H - %s</li>\"").trim()
println(ECSReleaseBody)
if (ECSReleaseBody != "") {
def apiUrlProject = "https://api.github.com/repos/" + PROJECT_URL.replaceFirst("^(http[s]?://[^/]+/)", "")
def releasePRBranch = "ecs_merge__${latestElasticRelease.tag_name}"

def checkPRQuery = sh(returnStdout: true, script: "curl -vvv -s --fail -H \"Authorization: Token ${GIT_TOKEN_PSW}\" -H \"Accept: application/json\" -H \"Content-type: application/json\" -X GET ${apiUrlProject}/pulls?head=${releasePRBranch}").trim()
def checkPR = readJSON text: "${checkPRQuery}"
println(checkPR)

if ("${checkPR.size()}" > 0) {
error "Pull Request May Already Exist!"
} else {
sh "git branch ${releasePRBranch}"
sh "git checkout ${releasePRBranch}"
// sh 'git commit -m "Automated upstream ECS merge of ' + latestElasticRelease.tag_name + '"'

def latestElasticReleaseBranch = sh(returnStdout: true, script: "git branch -r --contains tags/${latestElasticRelease.tag_name}").trim()
def repoUrlWithAuth = "https://${GIT_TOKEN_USR}:${GIT_TOKEN_PSW}@github.com/" + PROJECT_URL.replaceFirst("^(http[s]?://[^/]+/)", "") + ".git"
sh "git merge ${latestElasticReleaseBranch}"
sh "git push --repo=${repoUrlWithAuth} --set-upstream ${repoUrlWithAuth} ${releasePRBranch}"

def PRPayload = JsonOutput.toJson(["title": "Upstream ECS merge of ${latestElasticRelease.tag_name}", "body": "${ECSReleaseBody}", "head": "${releasePRBranch}", "base": "master"])
println(PRPayload)
sh "curl -vvv -s --fail -H \"Authorization: Token ${GIT_TOKEN_PSW}\" -H \"Accept: application/json\" -H \"Content-type: application/json\" -X POST -d '${PRPayload}' ${apiUrlProject}/pulls"
}
} else {
echo "There are no release changes"
}
}
}
}
16 changes: 16 additions & 0 deletions dougabuild/ansible.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
[defaults]
forks = 50
force_handlers = True
vault_password_file = .vaultpass-client.py
;vault_identity_list = [email protected], [email protected], [email protected], [email protected], [email protected]
host_key_checking = False
force_valid_group_names = ignore
roles_path = ./roles
interpreter_python = auto

[ssh_connection]
retries=5
ssh_args = -o 'UserKnownHostsFile=/dev/null' -o 'ControlMaster=auto' -o 'ControlPersist=60s'
#ssh_args = -o 'UserKnownHostsFile=/dev/null' -o 'ControlMaster=auto' -o 'ControlPersist=60s' -o ProxyCommand="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i <proxy_cert> -W %h:%p -q <user>@<host>>" ##To use with bastion
pipelining = True
control_path_dir=/tmp/.ansible/cp
37 changes: 37 additions & 0 deletions dougabuild/cluster.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
---

- name: clusterverse | Deploy the cluster
hosts: localhost
connection: local
gather_facts: no
tasks:
- { name: "Get dependent roles via ansible-galaxy", local_action: "command ansible-galaxy install --ignore-errors -r requirements.yml", tags: ["always"] }

- { include_role: { name: "clusterverse/clean", apply: { tags: ["clusterverse_clean"]} }, tags: ["clusterverse_clean"], when: "clean is defined" }
- { include_role: { name: "clusterverse/create", apply: { tags: ["clusterverse_create"]} }, tags: ["clusterverse_create"] }
- { include_role: { name: "clusterverse/dynamic_inventory", apply: { tags: ["clusterverse_dynamic_inventory"]} }, tags: ["clusterverse_dynamic_inventory"] }
- { name: "clusterverse | Copy ansible_ssh_private_key_file", local_action: "copy content={{cluster_vars[buildenv].ssh_connection_cfg.host.ansible_ssh_private_key_file}} dest='id_rsa_ansible_ssh_private_key_file' mode='0600'", when: "cluster_vars[buildenv].ssh_connection_cfg.host.ansible_ssh_private_key_file is defined", no_log: yes, tags: ["always"] }
- { name: "clusterverse | Copy bastion sshkey", local_action: "copy content={{cluster_vars[buildenv].ssh_connection_cfg.bastion.ssh_priv_key}} dest='id_rsa_bastion' mode='0600'", when: "cluster_vars[buildenv].ssh_connection_cfg.bastion.ssh_priv_key is defined", no_log: yes, tags: ["always"] }

- name: clusterverse | Wait for SSH connections
hosts: all
gather_facts: no
tasks: [ {wait_for_connection: "", tags: ["always"] } ]

- name: clusterverse | Configure the cluster
hosts: all
tasks: [ { include_role: { name: "clusterverse/config", apply: { tags: ["clusterverse_config"]} }, tags: ["clusterverse_config"] } ]


###### Application roles
- name: Application roles
hosts: all
tasks:
- { include_role: { name: "docserver", apply: { tags: ["docserver"]} }, tags: ["docserver"] }
######


- name: clusterverse | Perform cluster readiness operations
hosts: localhost
connection: local
tasks: [ { include_role: { name: "clusterverse/readiness", apply: { tags: ["clusterverse_readiness"]} }, tags: ["clusterverse_readiness"] } ]
3 changes: 3 additions & 0 deletions dougabuild/cluster_defs/app_vars.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
---

web_root: "/www"
30 changes: 30 additions & 0 deletions dougabuild/cluster_defs/cluster_vars.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
---

redeploy_schemes_supported: ['_scheme_addallnew_rmdisk_rollback', '_scheme_addnewvm_rmdisk_rollback', '_scheme_rmvm_rmdisk_only', '_scheme_rmvm_keepdisk_rollback']

#redeploy_scheme: _scheme_addallnew_rmdisk_rollback
#redeploy_scheme: _scheme_addnewvm_rmdisk_rollback
#redeploy_scheme: _scheme_rmvm_rmdisk_only
#redeploy_scheme: _scheme_rmvm_keepdisk_rollback

skip_dynamic_inventory_sshwait: true

app_name: "docserver" # The name of the application cluster (e.g. 'couchbase', 'nginx'); becomes part of cluster_name. Provided is a default to ensure no accidental overwriting.
app_class: "logging" # The class of application (e.g. 'database', 'webserver'); becomes part of the fqdn

cluster_name: "{{app_name}}-{{buildenv}}" # Identifies the cluster within the cloud environment

cluster_vars:
type: "{{cloud_type}}"
region: "{{region}}"
dns_server: "" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added.
dns_nameserver_zone: &dns_nameserver_zone "" # The zone that dns_server will operate on. gcloud dns needs a trailing '.'. Leave blank if no external DNS (use IPs only)
dns_user_domain: "{%- if _dns_nameserver_zone -%}{{cloud_type}}-{{region}}.{{app_class}}.{{buildenv}}.{{_dns_nameserver_zone}}{%- endif -%}" # A user-defined _domain_ part of the FDQN, (if more prefixes are required before the dns_nameserver_zone)
custom_tagslabels:
inv_environment_id: "{{buildenv}}"
inv_service_id: "{{app_class}}"
inv_cluster_id: "{{cluster_name}}"
inv_cluster_type: "{{app_name}}"
ssh_whitelist: &ssh_whitelist ['10.0.0.0/8', '82.69.177.168/29']
_ssh_whitelist: *ssh_whitelist
_dns_nameserver_zone: *dns_nameserver_zone
10 changes: 10 additions & 0 deletions dougabuild/cluster_defs/esxifree/cluster_vars__cloud.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---

_scheme_rmvm_keepdisk_rollback__copy_or_move: "move"

cluster_vars:
dns_cloud_internal_domain: "" # The cloud-internal zone as defined by the cloud provider (e.g. GCP, AWS)
dns_server: "" # Specify DNS server. nsupdate, route53 or clouddns. If empty string is specified, no DNS will be added.
inventory_ip: "private" # 'public' or 'private', (private in case we're operating in a private LAN). If public, 'assign_public_ip' must be 'yes'
hardware_version: "19"
delete_cloudinit: True # Whether to delete the cloud-init config and uninstall cloud-init after initialisation
25 changes: 25 additions & 0 deletions dougabuild/cluster_defs/esxifree/ecs/cluster_vars__clusterid.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
---

prometheus_node_exporter_install: false
filebeat_install: false
metricbeat_install: false

beats_config:
filebeat:
# output_logstash_hosts: ["localhost:5044"] # The destination hosts for filebeat-gathered logs
# extra_logs_paths: # The array is optional, if you need to add more paths or files to scrape for logs
# - /var/log/myapp/*.log
metricbeat:
# output_logstash_hosts: ["localhost:5044"] # The destination hosts for metricbeat-gathered metrics
# diskio: # Diskio retrieves metrics for all disks partitions by default. When diskio.include_devices is defined, only look for defined partitions
# include_devices: ["sda", "sdb", "nvme0n1", "nvme1n1", "nvme2n1"]


cluster_vars:
dns_nameserver_zone: &dns_nameserver_zone "chezdj.com" # The zone that dns_server will operate on. gcloud dns needs a trailing '.'. Leave blank if no external DNS (use IPs only)
dns_user_domain: "{%- if _dns_nameserver_zone -%}{{cloud_type}}-{{region}}.{{app_class}}.{{buildenv}}.{{_dns_nameserver_zone}}{%- endif -%}" # A user-defined _domain_ part of the FDQN, (if more prefixes are required before the dns_nameserver_zone)
custom_tagslabels:
inv_resident_id: "myresident"
inv_proposition_id: "myproposition"
inv_cost_centre: "0000000000"
_dns_nameserver_zone: *dns_nameserver_zone
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---

_ubuntu2004image: "gold-ubuntu2004l-20210415101808"
_centos7image: "gold-ubuntu2004l-20210415101808"

cluster_vars:
image: "{{_ubuntu2004image}}"
esxi_ip: "192.168.1.3"
username: "svc"
password: !vault |
$ANSIBLE_VAULT;1.1;AES256
7669080460651349243347331538721104778691266429457726036813912140404310
datastore: "4tb-evo860-ssd"
Loading

0 comments on commit a485d9c

Please sign in to comment.