The following workflow shows example how to use private git repo without compromising secrets.
cd security/git/go/
endly app
Where:
init:
appPath: $Pwd()/myapp
pipeline:
setTarget:
action: exec:setTarget
URL: ssh://127.0.0.1
credentials: dev
setSdk:
action: sdk:set
sdk: go:1.12
build:
action: exec:run
checkError: true
terminators:
- Password
- Username
secrets:
gitSecrets: git-myaccount
commands:
- cd $appPath
- export GIT_TERMINAL_PROMPT=1
- ls *
- $cmd[2].stdout:/myapp/? rm myapp
- export GO111MODULE=on
- go build -o myapp
- '$cmd[5].stdout:/Username/? $gitSecrets.username'
- '$cmd[6].stdout:/Password/? $gitSecrets.password'
- '$cmd[7].stdout:/Username/? $gitSecrets.username'
- '$cmd[8].stdout:/Password/? $gitSecrets.password'
stop:
action: process:stop
input: myapp
start:
action: process:start
directory: $appPath/
env:
PORT: 8081
watch: true
immuneToHangups: true
command: ./myapp
- git-myaccount is credentials file for private git repository created by endly -c=git-myaccount
Output:
The following workflow shows example how run database backup without compromising database credentials.
cd security/database
endly backup.yaml -t=take
endly backup.yaml -t=restore
Where:
init:
suffix: 20190716
backupFile: mydb_${suffix}.sql
dbname: mydb
dbbucket: sdatbackup
dbIP:
mysql: 127.0.0.1
pipeline:
take:
dump:
action: exec:run
systemPaths:
- /usr/local/mysql/bin
secrets:
mydb: mysql-mydb-root
commands:
- echo 'starting $dbname backup'
- mysqldump -uroot -p${mydb.password} -h${dbIP.mysql} $dbname > /tmp/$backupFile
- history -c
upload:
action: storage:copy
source:
URL: /tmp/$backupFile
dest:
credentials: gcp-myservice
URL: gs://${dbbucket}/data/
restore:
download:
action: storage:copy
source:
credentials: gcp-myservice
URL: gs://{dbbucket}/data/$backupFile
dest:
URL: /tmp/$backupFile
load:
action: exec:run
systemPaths:
- /usr/local/mysql/bin
secrets:
mydb: mysql-mydb-root
commands:
- echo 'starting $dbname restore'
- mysql -uroot -p ${mydb.password} -h${dbIP.mysql} $dbname < /tmp/$backupFile
- history -c
Where
- mysql-mydb-root is credentials file (~/.secret/mysql-mydb-root.json) created by
endly -c=mysql-mydb-root
- gs-myservice is google secrets credential file (~/.secret/gcp-myservice.json) created for your service account
- history -c clear history for security reason
Output:
Troubleshooting secrets: To show expanded password set ENDLY_SECRET_REVEAL=true
export ENDLY_SECRET_REVEAL=true
endly backup.yaml -t=take
Reference: Endly Secrets
The following workflow show how to build a application docker container image.
cd deplyoment/docker/go
endly app.yaml
Where:
init:
buildPath: $Pwd()
pipeline:
build:
action: docker:build
path: ${buildPath}
nocache: false
tag:
image: myapp
version: '1.0'
stop:
action: docker:stop
images:
- myapp
start:
action: docker:run
name: myapp
image: myapp:1.0
env:
PORT: 8081
- Dockerfile
# transient image FROM golang:1.12.7-alpine3.10 as build WORKDIR /go/src/app COPY myapp . ENV GO111MODULE on RUN go build -v -o /app # final image FROM alpine:3.10 RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* COPY --from=build /app /app CMD ["/app"]
Reference: Endly Docker Service
The following workflow shows how to automate react app build and deployment.
cd deplyoment/developer/node
endly app.yaml -m
Where:
init:
sourceCode: $Pwd()/my-app
appPath: /tmp/my-app
pipeline:
setTarget:
action: exec:setTarget
URL: ssh://cloud.machine
credentials: dev
setSdk:
action: sdk:set
sdk: node:12
copy:
action: storage:copy
dest: $execTarget
compress: true
logging: false
assets:
'$sourceCode': '/tmp/my-app'
build:
action: exec:run
checkError: true
commands:
- env
- cd $appPath
- npm install
- npm test
stop:
action: process:stop
input: react-scripts/scripts/start.js
start:
action: process:start
directory: $appPath/
watch: true
immuneToHangups: true
command: npm start
- '-m' option enables interactive mode (endly continues to run unless ctr-c)
- cloud.machine is your localhost or cloud VM
- dev is credentials created for cloud machine to connect with SSH service, created by
endly -c=dev
Output:
The following workflow shows how to automate java webapp build and deployment.
cd deplyoment/developer/tomcat
endly app.yaml
Where:
init:
appPath: $Pwd()/app
tomcatLocation: /tmp/webapp
tomcatTarget: $tomcatLocation/tomcat/webapps
pipeline:
setTarget:
action: exec:setTarget
URL: ssh://127.0.0.1
credentials: dev
setSdk:
action: sdk:set
sdk: jdk:1.8
setMaven:
action: deployment:deploy
appName: maven
version: 3.5
baseLocation: /usr/local
deployTomcat:
action: deployment:deploy
appName: tomcat
version: 7.0
baseLocation: $tomcatLocation
build:
action: exec:run
checkError: true
commands:
- cd $appPath
- mvn clean package
deploy:
action: storage:copy
source:
URL: $appPath/target/my-app-1.0.war
dest:
URL: $tomcatTarget/app.war
stop:
action: exec:run
commands:
- ps -ef | grep catalina | grep -v grep
- $cmd[0].stdout:/catalina/ ? $tomcatLocation/tomcat/bin/catalina.sh stop
start:
action: exec:run
checkErrors: true
commands:
- $tomcatLocation/tomcat/bin/catalina.sh start
- "echo 'App URL: http://127.0.0.1:8080/app/hello'"
The following workflow shows how to automate golang build and deployment.
cd deplyoment/developer/go
endly app.yaml
Where:
init:
appPath: $Pwd()/myapp
pipeline:
setTarget:
action: exec:setTarget
URL: ssh://127.0.0.1
credentials: dev
setSdk:
action: sdk:set
sdk: go:1.12
build:
action: exec:run
checkError: true
commands:
- cd $appPath
- ls *
- $cmd[1].stdout:/myapp/? rm myapp
- export GO111MODULE=on
- go build -o myapp
stop:
action: process:stop
input: myapp
start:
action: process:start
directory: $appPath/
env:
PORT: 8081
watch: true
immuneToHangups: true
command: ./myapp
The following workflow shows how to automate go app build and deployment in hybrid mode.
cd deplyoment/hybrid/go
endly app.yaml
Where:
init:
buildPath: $Pwd()
appPath: $Pwd()/myapp
pipeline:
setSdk:
action: sdk:set
sdk: go:1.12
build:
action: exec:run
checkError: true
commands:
- cd $appPath
- ls *
- $cmd[1].stdout:/app/? rm app
- export GO111MODULE=on
- export GOOS=linux
- export CGO=0
- go build -o app
buildImage:
action: docker:build
path: ${buildPath}
nocache: false
tag:
image: myapp
version: '1.0'
start:
action: exec:run
systemPaths:
- /usr/local/bin
commands:
- docker-compose down
- docker-compose up -d
- Dockerfile
FROM alpine:3.10 RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* COPY myapp/app /app CMD ["/app"]
The following workflow shows how automate cloud functions deployment.
cd deplyoment/serverless/cloud_functions/go
endly app.yaml
Where:
init:
appPath: $Pwd()/hello
gcpCredentials: gcp-myservice
pipeline:
setTarget:
action: exec:setTarget
URL: ssh://127.0.0.1
credentials: dev
setSdk:
action: sdk:set
sdk: go:1.12
vendor:
action: exec:run
commands:
- unset GOPATH
- GO111MODULE=on
- cd ${appPath}
- go mod vendor
deploy:
action: gcp/cloudfunctions:deploy
credentials: $gcpCredentials
'@name': HelloWorld
entryPoint: HelloWorldFn
runtime: go111
public: true
source:
URL: ${appPath}
References:
The following workflow shows how automate lambda deployment.
cd deplyoment/serverless/lambda/go
endly app.yaml
Where:
init:
functionRole: lambda-hello
appPath: $Pwd()/hello
appArchvive: ${appPath}/app.zip
awsCredentials: aws-myuser
pipeline:
setTarget:
action: exec:setTarget
URL: ssh://127.0.0.1
credentials: dev
setSdk:
action: sdk:set
sdk: go:1.12
deploy:
build:
action: exec:run
checkError: true
commands:
- cd ${appPath}
- unset GOPATH
- export GOOS=linux
- export GOARCH=amd64
- go build -o app
- zip -j app.zip app
publish:
action: aws/lambda:deploy
credentials: $awsCredentials
functionname: HelloWorld
runtime: go1.x
handler: app
code:
zipfile: $LoadBinary(${appArchvive})
rolename: $functionRole
attach:
- policyarn: arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
References:
The following workflow shows how automate database/datastore state setup with docekr.
cd deplyoment/state/docker
endly db
Where:
init:
mydbCredentials: mysql-mydb-root
mydbSecrets: ${secrets.$mydbCredentials}
pipeline:
services:
mysql:
action: docker:run
image: mysql:5.7
name: mydb1
ports:
3306: 3306
env:
MYSQL_ROOT_PASSWORD: ${mydbSecrets.Password}
aerospike:
action: docker:run
image: 'aerospike/aerospike-server:3.16.0.6'
name: mydb2
ports:
3000: 3000
3001: 3001
3002: 3002
3003: 3003
8081: 8081
cmd:
- asd
- --config-file
- /opt/aerospike/etc/aerospike.conf
entrypoint:
- /entrypoint.sh
Reference:
The following workflow shows how automate MySQL state setup
cd deplyoment/state/database/mysql
endly setup
Where
init:
mydbCredentials: mysql-mydb-root
mydbSecrets: ${secrets.$mydbCredentials}
dbIP:
mysql: 127.0.0.1
pipeline:
services:
mysql:
action: docker:run
image: mysql:5.7
name: dbsync
ports:
3306: 3306
env:
MYSQL_ROOT_PASSWORD: ${mydbSecrets.Password}
create:
mydb:
action: dsunit:init
datastore: mydb
recreate: true
config:
driverName: mysql
descriptor: '[username]:[password]@tcp(${dbIP.mysql}:3306)/[dbname]?parseTime=true'
credentials: $mydbCredentials
admin:
datastore: mysql
ping: true
config:
driverName: mysql
descriptor: '[username]:[password]@tcp(${dbIP.mysql}:3306)/[dbname]?parseTime=true'
credentials: $mydbCredentials
scripts:
- URL: mydb/schema.sql
load:
action: dsunit:prepare
datastore: mydb
URL: mydb/data
- mysql-mydb-root is mysql credential created by
endly -c=mysql-mydb-root
- 'mydb/data' is the source folder where *.json data file are matched with database tables.
The following workflow shows how automate PostgreSQL state setup
cd deplyoment/state/database/postgresql
endly setup
Where
init:
mydbCredentials: pq-mydb-root
mydbSecrets: ${secrets.$mydbCredentials}
dbIP:
pg: 127.0.0.1
pipeline:
services:
postgresql:
action: docker:run
image: postgres:9.6-alpine
name: mydb
ports:
5432: 5432
env:
POSTGRES_USER: ${mydbSecrets.Username}
POSTGRES_PASSWORD: ${mydbSecrets.Password}
create:
action: dsunit:init
datastore: mydb
config:
driverName: postgres
descriptor: host=${dbIP.pg} port=5432 user=[username] password=[password] dbname=[dbname] sslmode=disable
credentials: $mydbCredentials
admin:
datastore: postgres
ping: true
config:
driverName: postgres
descriptor: host=${dbIP.pg} port=5432 user=[username] password=[password] dbname=postgres sslmode=disable
credentials: $mydbCredentials
recreate: true
scripts:
- URL: mydb/schema.sql
load:
action: dsunit:prepare
datastore: mydb
URL: mydb/data
- pq-mydb-root is PostgreSQL credential created by
endly -c=pq-mydb-root
The following workflow shows how automate Big Query state setup
cd deplyoment/state/database/bigquery/setup
endly setup
Where
init:
bqCredentials: gcp-e2e
pipeline:
create:
action: dsunit:init
datastore: mydb
config:
driverName: bigquery
credentials: $bqCredentials
parameters:
datasetId: mydb
scripts:
- URL: mydb/schema.sql
load:
action: dsunit:prepare
datastore: mydb
URL: mydb/data
The following workflow shows how restore BigQuery data with copy API call.
cd deplyoment/state/database/bigquery/api
endly copy
init:
i: 0
gcpCredentials: gcp-e2e
gcpSecrets: ${secrets.$gcpCredentials}
src:
projectID: $gcpSecrets.ProjectID
datasetID: db1
dest:
projectID: $gcpSecrets.ProjectID
datasetID: db1e2e
pipeline:
registerSource:
action: dsunit:register
datastore: ${src.datasetID}
config:
driverName: bigquery
credentials: $gcpCredentials
parameters:
datasetId: $src.datasetID
readTables:
action: dsunit:query
datastore: ${src.datasetID}
SQL: SELECT table_id AS table FROM `${src.projectID}.${src.datasetID}.__TABLES__`
post:
dataset: $Records
copyTables:
loop:
action: print
message: $i/$Len($dataset) -> $dataset[$i].table
copyTable:
action: gcp/bigquery:copy
logging: false
credentials: $gcpCredentials
sourceTable:
projectID: ${src.projectID}
datasetID: ${src.datasetID}
tableID: $dataset[$i].table
destinationTable:
projectID: ${dest.projectID}
datasetID: ${dest.datasetID}
tableID: $dataset[$i].table
inc:
action: nop
init:
_ : $i++
goto:
when: $i < $Len($dataset)
action: goto
task: copyTables
The following workflow shows how automate MongoDB state setup
cd deplyoment/state/datastore/mongo
endly setup
pipeline:
services:
mongo:
action: docker:run
image: mongo:latest
name: mymongo
ports:
27017: 27017
register:
action: dsunit:register
datastore: mydb
ping: true
config:
driverName: mgc
parameters:
dbname: mydb
host: 127.0.0.1
keyColumn: id
load:
action: dsunit:prepare
datastore: mydb
URL: mydb/data
The following workflow shows how automate Aerospike state setup
cd state/datastore/aerospike
endly setup
pipeline:
services:
aerospike:
action: docker:run
image: aerospike/aerospike-server:latest
name: aero
ports:
3000: 3000
3001: 3001
3002: 3002
3004: 3004
setup:
create:
action: dsunit:init
datastore: aerodb
ping: true
config:
driverName: aerospike
parameters:
dbname: aerodb
excludedColumns: uid
namespace: test
host: 127.0.0.1
port: 3000
users.keyColumn: uid
recreate: true
load:
action: dsunit:prepare
datastore: aerodb
URL: aerodb/data
Setup Data: @users.json
[
{},
{
"uid": "${uuid.next}",
"events": {
"$AsInt(952319704)": {
"ttl": 1565478965
},
"$AsInt(947840387)": {
"ttl": 1565479008
}
}
},
{
"uid": "${uuid.next}",
"events": {
"$AsInt(857513776)": {
"ttl": 1565479080
},
"$AsInt(283419022)": {
"ttl": 1565479092
}
}
}
]
aql> SELECT * FROM test.users;
+----------------------------------------+---------------------------------------------------------------------+
| PK | events |
+----------------------------------------+---------------------------------------------------------------------+
| "3b6b7f47-453d-4a07-aff0-879bc85d264c" | MAP('{947840387:{"ttl":1565479008}, 952319704:{"ttl":1565478965}}') |
| "67bf0d31-b9a7-417c-86dd-62c03d2bd60c" | MAP('{283419022:{"ttl":1565479092}, 857513776:{"ttl":1565479080}}') |
+----------------------------------------+---------------------------------------------------------------------+
2 rows in set (0.142 secs)
OK
The following workflow shows how automate AWS DynamoDB state setup
cd state/datastore/dbynamodb
endly setup authWith=myAwsSecret.json
init:
'!mydbCredentials': $params.authWith
pipeline:
setup:
action: dsunit:init
datastore: mydb
config:
driverName: dyndb
credentials: $mydbCredentials
tables:
- table: events
pkColumns:
- Date
- EventType
schemaURL: mydb/schema.json
load:
action: dsunit:prepare
datastore: mydb
URL: mydb/data
The following workflow shows how automate GCP Firestore state setup
cd state/datastore/firestore
endly setup authWith=myGCPSecrets.json
init:
'!gcpSecrets': $params.authWith
gcpSecretsMap: ${secrets.$gcpSecrets}
projectID: ${gcpSecretsMap.ProjectID}
pipeline:
init:
action: dsunit:init
datastore: mydb
config:
driverName: fsc
credentials: $gcpSecrets
parameters:
projectID: $projectID
prepare:
action: dsunit:prepare
datastore: mydb
URL: mydb/data
The following show how to dynamically assemble configuration file
cd state/config
endly dynamic.yaml
cat /tmp/myapp/config.json
where:
init:
settings: $Cat('settings.json')
settingsMap: $AsMap('$settings')
config:
key1: val1
key2: val2
featureX: ${settingsMap.featureX}
pipeline:
info:
action: print
message: $AsString('$config')
dynamic:
init:
cfg: $AsJSON('$config')
action: storage:upload
sourceKey: cfg
dest:
URL: /tmp/myapp/config.json
{
"featureX": true
}
To dynamically update config.properties in the war file you can use the following:
init:
changeMe: this is my secret
pipeline:
updateArchive:
action: storage:copy
expand: true
source:
URL: app/config.properties
dest:
URL: file:/tmp/app.war/zip://localhost/WEB-INF/classes/
Reference:
cd state/data
endly generate.yaml
head -n 10 /tmp/myasset.csv
where:
pipeline:
createCSV:
action: storage:generate
lines: 20
lineTemplate: '$i,name $i,address $i'
dest:
URL: /tmp/myasset.csv
Or alternatively you can generate JSON file @json.yaml
pipeline:
generate:
action: storage:generate
indexVariable: id
lines: 100
index: 55
lineTemplate: '{"id": ${id}, "name": "dummy ${id}", "type_id": ${id % 3} } '
dest:
URL: dummy.json
Reference: Messaging Service
init:
awsCredentials: aws-e2e
pipeline:
create:
action: msg:setupResource
credentials: $awsCredentials
resources:
- URL: mye2eQueue1
type: queue
vendor: aws
setup:
action: msg:push
credentials: $awsCredentials
sleepTimeMs: 5000
dest:
URL: mye2eQueue1
type: queue
vendor: aws
messages:
- data: 'Test: this is my 1st message'
- data: 'Test: this is my 2nd message'
init:
gcpCredentials: gcp-e2e
pipeline:
create:
action: msg:setupResource
resources:
- URL: myTopic
type: topic
vendor: gcp
credentials: $gcpCredentials
setup:
action: msg:push
dest:
URL: /projects/${msg.projectID}/topics/myTopic
credentials: $gcpCredentials
source:
URL: data.json
where:
[
{
"data": "this is my 1st message",
"attributes": {
"attr1": "abc"
}
},
{
"data": "this is my 2nd message",
"attributes": {
"attr1": "xyz"
}
}
]
pipeline:
create:
sleepTimeMs: 10000
action: msg:setupResource
comments: create topic and wait for a leadership election
resources:
- URL: myTopic
type: topic
replicationFactor: 1
partitions: 1
brokers:
- localhost:9092
setup:
action: msg:push
dest:
url: tcp://localhost:9092/myTopic
vendor: kafka
messages:
- data: "this is my 1st message"
attributes:
key: abc
- data: "this is my 2nd message"
attributes:
key: xyz