forked from catenax/tractusx
-
Notifications
You must be signed in to change notification settings - Fork 2
208 lines (184 loc) · 8.28 KB
/
portal.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
#
# Copyright (c) 2021 T-Systems International GmbH (Catena-X Consortium)
#
# See the AUTHORS file(s) distributed with this work for additional
# information regarding authorship.
#
# See the LICENSE file(s) distributed with this work for
# additional information regarding license terms.
#
#################################################################################################################
# Github Workflow for continuous portal deployment, could depend on infrastructure workflows
#################################################################################################################
# The following secrets are expected to be set
# AZURE_CREDENTIALS - JSON object containing all info to login to azure via a Service Principal (SP)
# AZURE_SUBSCRIPTION_ID - part of AZURE_CREDENTIALS referring the ID of the deployment target
# AZURE_AD_CLIENT_ID - part of AZURE_CREDENTIALS referring the ID of the SP
# AZURE_AD_CLIENT_SECRET - part of AZURE_CREDENTIALS having the login secret for the SP
# AZURE_AD_TENANT_ID - part of AZURE_CREDENTIALS referring the hosting AD of the SP
# CATENAX_ADMIN_USER - name of the admin user to be used to secure deployed resources
# CATENAX_ADMIN_PASSWORD - secret of the admin user to be used to secure deployed resources
# CATENAX_USER - name of the operation user to be used to secure deployed resources
# CATENAX_PASSWORD - secret of the operation user to be used to secure deployed resources
# CATENAX_ADMIN_GROUP_ID - referring to the ID of the admin group that will be assigned to deployed resources
#################################################################################################################
# The following preconditions need to be made
# Upload ${workspace}ssh.pub to the TFSTATE storage account
##################################################################################################################
# If you intend to use this workflow in new branches/workspaces, see the comments marked
# ADAPT HERE TO ADD NEW WORKSPACES
# name of workflow (as well as subordinate workflows and jobs) start with a "run-level" such that we can
# depend on them in order to implement workflow dependencies
name: 8 Frontend Portal
####################################################
# Should trigger upon pushs to the portal related
# branches and folders
####################################################
# ADAPT HERE TO ADD NEW WORKSPACES
on:
push:
paths:
# This is where the terraform related scripts and manifests reside
- 'portal/code/cx-portal/**'
# any deployment manifest
- 'infrastructure/manifests/portal.yaml'
# this workflow file
- '.github/workflows/portal.yml'
# ADAPT HERE TO ADD NEW WORKSPACES
branches:
# Integration environment
- main
# ART1 Semantics Layer and Digitial Twin Dev Space
- feature/CATX-A1-SLDT
# Onboarding Dev Space
- feature/CAX-portal
# ADAPT HERE TO ADD NEW WORKSPACES
###############################################################
# Consists of two jobs, the first determines the environment
# and the second one (doing the real work) only triggers
# if such an environment can be detected (else the workflow is
# green without doing something)
###############################################################
# ADAPT HERE TO ADD NEW WORKSPACES
jobs:
########################################
# First job to determine the environment
########################################
# ADAPT HERE TO ADD NEW WORKSPACES
environment:
# name of the job starts with a "run-level" subordinate to the workflow such that we can
# depend on them in order to implement workflow dependencies
name: 80 Frontend Portal
runs-on: ubuntu-latest
outputs:
workspace: ${{ steps.setvars.outputs.workspace }}
steps:
- name: Set variables
id: setvars
run: |
if [[ "${{github.repository}}" == eclipse/tractusx ]]; then
if [[ "${{github.ref}}" == refs/heads/main ]]; then
echo "Determined PRODUCTION"
echo "::set-output name=workspace::prod"
else
echo "Unsupported Environment on ECLIPSE. Leaving Workspace empty."
fi
elif [[ "${{github.repository}}" == catenax/tractusx ]]; then
if [[ "${{github.ref}}" == refs/heads/main ]]; then
echo "Determined INTEGRATION"
echo "::set-output name=workspace::int"
elif [[ "${{github.ref}}" = refs/heads/feature/CATX-A1-SLDT ]]; then
echo "Determined SEMANTICS"
echo "::set-output name=workspace::dev042"
else
echo "Unsupported Branch on CATENAX. Leaving Workspace empty."
fi
elif [[ "${{github.repository}}" == tractusx-team-portal-onboarding/tractusx ]]; then
if [[ "${{github.ref}}" == refs/heads/feature/CAX-portal ]]; then
echo "Determined dev003"
echo "::set-output name=workspace::dev003"
else
echo "Unsupported Branch on CATENAX. Leaving Workspace empty."
fi
else
echo "Unsupported Environment/Repository. Leaving Workspace empty."
fi
##########################################
# Second job does the real container work
##########################################
# FROM HERE ON: NO ADAPTION NEEDED ANYMORE
containerize:
# name of the job starts with a "run-level" subordinate to the workflow such that we can
# depend on them in order to implement workflow dependencies
name: 89 Frontend Portal NG Build & Deploy
runs-on: ubuntu-latest
# rely on the first job
needs: environment
# rely on successful detection of the workspace, ignore if empty
if: ${{needs.environment.outputs.workspace}}
# Set important environment vars
env:
WORKSPACE: ${{needs.environment.outputs.workspace}}
CX_ENV: ${{needs.environment.outputs.workspace}}
CATENA_PORTAL_URL: catenax${{needs.environment.outputs.workspace}}aksportal.germanywestcentral.cloudapp.azure.com
# use the commit hash in production?
VERSION: latest
steps:
# Wait for infrastructure deployments to terminate
- name: Wait for Infrastructure
uses: lewagon/[email protected]
with:
ref: ${{ github.ref }}
check-regexp: 0.*
repo-token: ${{ secrets.GITHUB_TOKEN }}
wait-interval: 30
allowed-conclusions: success,skipped,cancelled
- name: Checkout
uses: actions/checkout@v2
- name: Install Dependencies
working-directory: ./portal/code
run: yarn
- name: Linter Checks
working-directory: ./portal/code
run: yarn lint
- name: Build Library and Portal
working-directory: ./portal/code
run: yarn build
- name: Unit Tests
working-directory: ./portal/code/cx-portal
run: yarn test
# # activate this step on new repo
# - name: Publish Shared Components to npm
# working-directory: ./cx-portal-shared-components
# run: echo yarn publish
# env:
# NODE_AUTH_TOKEN: ${{secrets.NPM_ACCESS_TOKEN}}
- name: Login to Docker Registry
uses: azure/docker-login@v1
with:
login-server: catenaxacr.azurecr.io
username: catenaxacr
password: ${{secrets.AZURE_REGISTRY_PASSWORD}}
- name: Build Docker Image
working-directory: ./portal/code
run: yarn build:docker
- name: Publish Docker Image
working-directory: ./portal/code
run: yarn publish:docker
- name: Login to Azure
uses: azure/login@v1
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
- name: Kubernetes Login
uses: azure/aks-set-context@v1
with:
creds: '${{ secrets.AZURE_CREDENTIALS }}'
resource-group: 'catenax-${{needs.environment.outputs.workspace}}-rg'
cluster-name: 'catenax-${{needs.environment.outputs.workspace}}-aks-services'
id: login
- name: Deploy the Portal
working-directory: ./infrastructure/manifests
run: cat portal.yaml | envsubst | kubectl apply -n portal -f -
- name: Rollout the Deployment
working-directory: ./infrastructure/manifests
run: kubectl rollout restart deployment portal -n portal