-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvpc.yml
193 lines (190 loc) · 6.51 KB
/
vpc.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
- name: Provision VPC
hosts: localhost
gather_facts: false
vars:
- output:
vpc_public_subnets: []
vpc_kubernetes_subnets: []
tasks:
- name: Include variables
include_vars: all.yml
- name: VPC is present
ec2_vpc_net:
region: "{{ vpc_region }}"
cidr_block: "{{ vpc_cidr }}"
name: "{{ kubernetes.name }}"
tags:
KubernetesCluster: "{{ kubernetes.name }}"
register: vpc_out
- name: VPC id is set in output
set_fact:
output: "{{ output | combine({'vpc_id': vpc_out.vpc.id}) }}"
- name: Internet gateway is present
ec2_vpc_igw:
region: "{{ vpc_region }}"
vpc_id: "{{ vpc_out.vpc.id }}"
- name: Public subnets are present
ec2_vpc_subnet:
region: "{{ vpc_region }}"
vpc_id: "{{ vpc_out.vpc.id }}"
az: "{{ item.az }}"
cidr: "{{ item.cidr }}"
tags:
Name: "Public"
register: public_subnets_out
with_items: "{{ vpc_public_subnets }}"
- name: Public subnets are set in output
set_fact:
output: "{{ output | combine({'vpc_public_subnets': output.vpc_public_subnets + [item.subnet]}) }}"
with_items: "{{ public_subnets_out.results }}"
- name: Public route table is present
ec2_vpc_route_table:
region: "{{ vpc_region }}"
vpc_id: "{{ vpc_out.vpc.id }}"
subnets: "{{ vpc_public_subnets | map(attribute='cidr') | list }}"
routes:
- dest: 0.0.0.0/0
gateway_id: igw
tags:
Name: Public
- name: NAT gateway is present
ec2_vpc_nat_gateway:
region: "{{ vpc_region }}"
subnet_id: "{{ public_subnets_out.results[0].subnet.id }}"
if_exist_do_not_create: true
wait: yes
register: nat_gateway_out
- name: Kubernetes subnets are present
ec2_vpc_subnet:
region: "{{ vpc_region }}"
vpc_id: "{{ vpc_out.vpc.id }}"
az: "{{ item.az }}"
cidr: "{{ item.cidr }}"
tags:
KubernetesCluster: "{{ kubernetes.name }}"
register: kubernetes_subnets_out
with_items: "{{ vpc_kubernetes_subnets }}"
- name: Kubernetes subnets are set in output
set_fact:
output: "{{ output | combine({'vpc_kubernetes_subnets': output.vpc_kubernetes_subnets + [item.subnet]}) }}"
with_items: "{{ kubernetes_subnets_out.results }}"
- name: Kubernetes route table is present
ec2_vpc_route_table:
region: "{{ vpc_region }}"
vpc_id: "{{ vpc_out.vpc.id }}"
subnets: "{{ vpc_kubernetes_subnets | map(attribute='cidr') | list }}"
routes:
- dest: 0.0.0.0/0
gateway_id: "{{ nat_gateway_out.nat_gateway_id }}"
tags:
KubernetesCluster: "{{ kubernetes.name }}"
- name: DHCP options are present
ec2_vpc_dhcp_options:
region: "{{ vpc_region }}"
vpc_id: "{{ vpc_out.vpc.id }}"
dns_servers:
- AmazonProvidedDNS
domain_name: "{{ vpc_region }}.compute.internal"
tags:
KubernetesCluster: "{{ kubernetes.name }}"
- name: Instance profiles are present
iam:
iam_type: role
name: "{{ item }}"
state: present
trust_policy:
Version: "2012-10-17"
Statement:
- Action: sts:AssumeRole
Effect: Allow
Principal:
Service: ec2.amazonaws.com
with_items:
- KubeEtcd
- KubeMaster
- KubeWorker
- name: KubeEtcd IAM Policy is present
iam_policy:
policy_name: KubeEtcd
policy_json: "{{ lookup('template', 'templates/iam_policies/kube_etcd.json', convert_data=False) }}"
state: present
iam_type: role
iam_name: KubeEtcd
- name: KubeMaster IAM Policy is present
iam_policy:
policy_name: KubeMaster
policy_json: "{{ lookup('template', 'templates/iam_policies/kube_master.json', convert_data=False) }}"
state: present
iam_type: role
iam_name: KubeMaster
- name: KubeWorker IAM Policy is present
iam_policy:
policy_name: KubeWorker
policy_json: "{{ lookup('template', 'templates/iam_policies/kube_worker.json', convert_data=False) }}"
state: present
iam_type: role
iam_name: KubeWorker
- name: EC2 keypair is present
ec2_key:
name: "{{ ec2_keypair }}"
region: "{{ vpc_region }}"
register: ec2_keypair_out
- name: Save EC2 private key
copy:
content: "{{ ec2_keypair_out.key.private_key }}"
dest: files/{{ ec2_keypair }}-private.pem
when: ec2_keypair_out.changed
- name: Notify of important file
debug:
msg: "Please save the generated private key {{ ec2_keypair }}-private.pem in a safe place"
when: ec2_keypair_out.changed
- name: Etcd ELB is present
ec2_elb_lb:
name: etcd
region: "{{ vpc_region }}"
subnets: "{{ kubernetes_subnets_out.results | map(attribute='subnet') | map(attribute='id') | list }}"
cross_az_load_balancing: yes
scheme: internal
state: present
connection_draining_timeout: 60
listeners:
- protocol: tcp
load_balancer_port: 2379
instance_port: 2379
health_check:
ping_protocol: tcp
ping_port: 2379
response_timeout: 5
interval: 10
unhealthy_threshold: 2
healthy_threshold: 3
register: etcd_elb_out
- name: Etcd endpoint is set in output
set_fact:
output: "{{ output | combine({'etcd_endpoint': etcd_elb_out.elb.dns_name}) }}"
- name: Kubernetes ELB is present
ec2_elb_lb:
name: kubernetes
region: "{{ vpc_region }}"
subnets: "{{ kubernetes_subnets_out.results | map(attribute='subnet') | map(attribute='id') | list }}"
cross_az_load_balancing: yes
scheme: internal
state: present
connection_draining_timeout: 60
listeners:
- protocol: tcp
load_balancer_port: 6443
instance_port: 6443
health_check:
ping_protocol: tcp
ping_port: 6443
response_timeout: 5
interval: 10
unhealthy_threshold: 2
healthy_threshold: 3
register: kubernetes_elb_out
- name: Kubernetes endpoint is set in output
set_fact:
output: "{{ output | combine({'kubernetes_endpoint': kubernetes_elb_out.elb.dns_name}) }}"
- name: Variables are printed
debug: var=output