From 818642fc94632d01da31c49f040c7a4d40b41c99 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 17 Oct 2016 15:18:33 -0400 Subject: [PATCH 1/2] start of nested stacks --- inventory/aws/ec2.py | 289 ++++++++++++++---- .../files/nested/control-plane/default.yaml | 229 ++++++++++++++ .../files/nested/iam-profiles/default.yaml | 65 ++++ .../files/nested/master-user-data.yaml | 25 ++ .../files/nested/security-groups/default.yaml | 285 +++++++++++++++++ ..._master_infra_asg_node_asg_no_bastion.yaml | 237 ++++++++++++++ .../files/nested/vpc/default.yaml | 215 +++++++++++++ .../lookup_plugins/ec2_zones_by_region.py | 22 +- .../aws/cloudformation/nested_setup.yml | 45 +++ 9 files changed, 1353 insertions(+), 59 deletions(-) create mode 100644 playbooks/provisioning/aws/cloudformation/files/nested/control-plane/default.yaml create mode 100644 playbooks/provisioning/aws/cloudformation/files/nested/iam-profiles/default.yaml create mode 100644 playbooks/provisioning/aws/cloudformation/files/nested/master-user-data.yaml create mode 100644 playbooks/provisioning/aws/cloudformation/files/nested/security-groups/default.yaml create mode 100644 playbooks/provisioning/aws/cloudformation/files/nested/three_master_infra_asg_node_asg_no_bastion.yaml create mode 100644 playbooks/provisioning/aws/cloudformation/files/nested/vpc/default.yaml create mode 100644 playbooks/provisioning/aws/cloudformation/nested_setup.yml diff --git a/inventory/aws/ec2.py b/inventory/aws/ec2.py index 8b878cafd..3def6037a 100755 --- a/inventory/aws/ec2.py +++ b/inventory/aws/ec2.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python ''' EC2 external inventory script @@ -37,6 +37,7 @@ - ec2_attachTime - ec2_attachment - ec2_attachmentId + - ec2_block_devices - ec2_client_token - ec2_deleteOnTermination - ec2_description @@ -131,6 +132,15 @@ from boto import route53 import six +from ansible.module_utils import ec2 as ec2_utils + +HAS_BOTO3 = False +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + pass + from six.moves import configparser from collections import defaultdict @@ -141,6 +151,7 @@ class Ec2Inventory(object): + def _empty_inventory(self): return {"_meta" : {"hostvars" : {}}} @@ -157,6 +168,9 @@ def __init__(self): # Boto profile to use (if any) self.boto_profile = None + # AWS credentials. + self.credentials = {} + # Read settings and parse CLI arguments self.parse_cli_args() self.read_settings() @@ -224,7 +238,7 @@ def read_settings(self): configRegions_exclude = config.get('ec2', 'regions_exclude') if (configRegions == 'all'): if self.eucalyptus_host: - self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) + self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) else: for regionInfo in ec2.regions(): if regionInfo.name not in configRegions_exclude: @@ -236,6 +250,11 @@ def read_settings(self): self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') + if config.has_option('ec2', 'hostname_variable'): + self.hostname_variable = config.get('ec2', 'hostname_variable') + else: + self.hostname_variable = None + if config.has_option('ec2', 'destination_format') and \ config.has_option('ec2', 'destination_format_tags'): self.destination_format = config.get('ec2', 'destination_format') @@ -256,6 +275,12 @@ def read_settings(self): if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') + # Include RDS cluster instances? + if config.has_option('ec2', 'include_rds_clusters'): + self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') + else: + self.include_rds_clusters = False + # Include ElastiCache instances? self.elasticache_enabled = True if config.has_option('ec2', 'elasticache'): @@ -318,6 +343,29 @@ def read_settings(self): if config.has_option('ec2', 'boto_profile') and not self.boto_profile: self.boto_profile = config.get('ec2', 'boto_profile') + # AWS credentials (prefer environment variables) + if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or + os.environ.get('AWS_PROFILE')): + if config.has_option('credentials', 'aws_access_key_id'): + aws_access_key_id = config.get('credentials', 'aws_access_key_id') + else: + aws_access_key_id = None + if config.has_option('credentials', 'aws_secret_access_key'): + aws_secret_access_key = config.get('credentials', 'aws_secret_access_key') + else: + aws_secret_access_key = None + if config.has_option('credentials', 'aws_security_token'): + aws_security_token = config.get('credentials', 'aws_security_token') + else: + aws_security_token = None + if aws_access_key_id: + self.credentials = { + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key + } + if aws_security_token: + self.credentials['security_token'] = aws_security_token + # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if self.boto_profile: @@ -325,10 +373,22 @@ def read_settings(self): if not os.path.exists(cache_dir): os.makedirs(cache_dir) - self.cache_path_cache = cache_dir + "/ansible-ec2.cache" - self.cache_path_index = cache_dir + "/ansible-ec2.index" + cache_name = 'ansible-ec2' + aws_profile = lambda: (self.boto_profile or + os.environ.get('AWS_PROFILE') or + os.environ.get('AWS_ACCESS_KEY_ID') or + self.credentials.get('aws_access_key_id', None)) + if aws_profile(): + cache_name = '%s-%s' % (cache_name, aws_profile()) + self.cache_path_cache = cache_dir + "/%s.cache" % cache_name + self.cache_path_index = cache_dir + "/%s.index" % cache_name self.cache_max_age = config.getint('ec2', 'cache_max_age') + if config.has_option('ec2', 'expand_csv_tags'): + self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') + else: + self.expand_csv_tags = False + # Configure nested groups instead of flat namespace. if config.has_option('ec2', 'nested_groups'): self.nested_groups = config.getboolean('ec2', 'nested_groups') @@ -390,7 +450,10 @@ def read_settings(self): # Instance filters (see boto and EC2 API docs). Ignore invalid filters. self.ec2_instance_filters = defaultdict(list) if config.has_option('ec2', 'instance_filters'): - for instance_filter in config.get('ec2', 'instance_filters', '').split(','): + + filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f] + + for instance_filter in filters: instance_filter = instance_filter.strip() if not instance_filter or '=' not in instance_filter: continue @@ -409,7 +472,7 @@ def parse_cli_args(self): help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') - parser.add_argument('--boto-profile', action='store', + parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', help='Use boto profile for connections to EC2') self.args = parser.parse_args() @@ -427,6 +490,8 @@ def do_api_calls_update_cache(self): if self.elasticache_enabled: self.get_elasticache_clusters_by_region(region) self.get_elasticache_replication_groups_by_region(region) + if self.include_rds_clusters: + self.include_rds_clusters_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) @@ -434,7 +499,7 @@ def do_api_calls_update_cache(self): def connect(self, region): ''' create connection to api server''' if self.eucalyptus: - conn = boto.connect_euca(host=self.eucalyptus_host) + conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials) conn.APIVersion = '2010-08-31' else: conn = self.connect_to_aws(ec2, region) @@ -448,7 +513,7 @@ def boto_fix_security_token_in_profile(self, connect_args): return connect_args def connect_to_aws(self, module, region): - connect_args = {} + connect_args = self.credentials # only pass the profile name if it's set (as it is not supported by older boto versions) if self.boto_profile: @@ -474,15 +539,32 @@ def get_instances_by_region(self, region): else: reservations = conn.get_all_instances() + # Pull the tags back in a second step + # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not + # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags` + instance_ids = [] + for reservation in reservations: + instance_ids.extend([instance.id for instance in reservation.instances]) + + max_filter_value = 199 + tags = [] + for i in range(0, len(instance_ids), max_filter_value): + tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]})) + + tags_by_instance_id = defaultdict(dict) + for tag in tags: + tags_by_instance_id[tag.res_id][tag.name] = tag.value + for reservation in reservations: for instance in reservation.instances: + instance.tags = tags_by_instance_id[instance.id] self.add_instance(instance, region) except boto.exception.BotoServerError as e: if e.error_code == 'AuthFailure': error = self.get_auth_error_message() else: - backend = 'Eucalyptus' if self.eucalyptus else 'AWS' + backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) self.fail_with_error(error, 'getting EC2 instances') @@ -493,9 +575,14 @@ def get_rds_instances_by_region(self, region): try: conn = self.connect_to_aws(rds, region) if conn: - instances = conn.get_all_dbinstances() - for instance in instances: - self.add_rds_instance(instance, region) + marker = None + while True: + instances = conn.get_all_dbinstances(marker=marker) + marker = instances.marker + for instance in instances: + self.add_rds_instance(instance, region) + if not marker: + break except boto.exception.BotoServerError as e: error = e.reason @@ -505,6 +592,65 @@ def get_rds_instances_by_region(self, region): error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, 'getting RDS instances') + def include_rds_clusters_by_region(self, region): + if not HAS_BOTO3: + self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", + "getting RDS clusters") + + client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) + + marker, clusters = '', [] + while marker is not None: + resp = client.describe_db_clusters(Marker=marker) + clusters.extend(resp["DBClusters"]) + marker = resp.get('Marker', None) + + account_id = boto.connect_iam().get_user().arn.split(':')[4] + c_dict = {} + for c in clusters: + # remove these datetime objects as there is no serialisation to json + # currently in place and we don't need the data yet + if 'EarliestRestorableTime' in c: + del c['EarliestRestorableTime'] + if 'LatestRestorableTime' in c: + del c['LatestRestorableTime'] + + if self.ec2_instance_filters == {}: + matches_filter = True + else: + matches_filter = False + + try: + # arn:aws:rds:::: + tags = client.list_tags_for_resource( + ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) + c['Tags'] = tags['TagList'] + + if self.ec2_instance_filters: + for filter_key, filter_values in self.ec2_instance_filters.items(): + # get AWS tag key e.g. tag:env will be 'env' + tag_name = filter_key.split(":", 1)[1] + # Filter values is a list (if you put multiple values for the same tag name) + matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) + + if matches_filter: + # it matches a filter, so stop looking for further matches + break + + except Exception as e: + if e.message.find('DBInstanceNotFound') >= 0: + # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. + # Ignore errors when trying to find tags for these + pass + + # ignore empty clusters caused by AWS bug + if len(c['DBClusterMembers']) == 0: + continue + elif matches_filter: + c_dict[c['DBClusterIdentifier']] = c + + self.inventory['db_clusters'] = c_dict + def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with nodes' info) in a particular region.''' @@ -513,7 +659,7 @@ def get_elasticache_clusters_by_region(self, region): # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: - conn = elasticache.connect_to_region(region) + conn = self.connect_to_aws(elasticache, region) if conn: # show_cache_node_info = True # because we also want nodes' information @@ -549,7 +695,7 @@ def get_elasticache_replication_groups_by_region(self, region): # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: - conn = elasticache.connect_to_region(region) + conn = self.connect_to_aws(elasticache, region) if conn: response = conn.describe_replication_groups() @@ -618,7 +764,7 @@ def add_instance(self, instance, region): # Select the best destination address if self.destination_format and self.destination_format_tags: - dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ]) + dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ]) elif instance.subnet_id: dest = getattr(instance, self.vpc_destination_variable, None) if dest is None: @@ -632,32 +778,46 @@ def add_instance(self, instance, region): # Skip instances we cannot address (e.g. private VPC subnet) return + # Set the inventory name + hostname = None + if self.hostname_variable: + if self.hostname_variable.startswith('tag_'): + hostname = instance.tags.get(self.hostname_variable[4:], None) + else: + hostname = getattr(instance, self.hostname_variable) + + # If we can't get a nice hostname, use the destination address + if not hostname: + hostname = dest + else: + hostname = self.to_safe(hostname).lower() + # if we only want to include hosts that match a pattern, skip those that don't - if self.pattern_include and not self.pattern_include.match(dest): + if self.pattern_include and not self.pattern_include.match(hostname): return # if we need to exclude hosts that match a pattern, skip those - if self.pattern_exclude and self.pattern_exclude.match(dest): + if self.pattern_exclude and self.pattern_exclude.match(hostname): return # Add to index - self.index[dest] = [region, instance.id] + self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: - self.inventory[instance.id] = [dest] + self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: - self.push(self.inventory, region, dest) + self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: - self.push(self.inventory, instance.placement, dest) + self.push(self.inventory, instance.placement, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.placement) @@ -666,28 +826,28 @@ def add_instance(self, instance, region): # Inventory: Group by Amazon Machine Image (AMI) ID if self.group_by_ami_id: ami_id = self.to_safe(instance.image_id) - self.push(self.inventory, ami_id, dest) + self.push(self.inventory, ami_id, hostname) if self.nested_groups: self.push_group(self.inventory, 'images', ami_id) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_type) - self.push(self.inventory, type_name, dest) + self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by key pair if self.group_by_key_pair and instance.key_name: key_name = self.to_safe('key_' + instance.key_name) - self.push(self.inventory, key_name, dest) + self.push(self.inventory, key_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) - self.push(self.inventory, vpc_id_name, dest) + self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) @@ -696,44 +856,51 @@ def add_instance(self, instance, region): try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) - self.push(self.inventory, key, dest) + self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: - self.fail_with_error('\n'.join(['Package boto seems a bit older.', + self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): - if v: - key = self.to_safe("tag_" + k + "=" + v) + if self.expand_csv_tags and v and ',' in v: + values = map(lambda x: x.strip(), v.split(',')) else: - key = self.to_safe("tag_" + k) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + values = [v] + + for v in values: if v: - self.push_group(self.inventory, self.to_safe("tag_" + k), key) + key = self.to_safe("tag_" + k + "=" + v) + else: + key = self.to_safe("tag_" + k) + self.push(self.inventory, key, hostname) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + if v: + self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled if self.route53_enabled and self.group_by_route53_names: route53_names = self.get_instance_route53_names(instance) for name in route53_names: - self.push(self.inventory, name, dest) + self.push(self.inventory, name, hostname) if self.nested_groups: self.push_group(self.inventory, 'route53', name) # Global Tag: instances without tags if self.group_by_tag_none and len(instance.tags) == 0: - self.push(self.inventory, 'tag_none', dest) + self.push(self.inventory, 'tag_none', hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: tag all EC2 instances - self.push(self.inventory, 'ec2', dest) + self.push(self.inventory, 'ec2', hostname) - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest def add_rds_instance(self, instance, region): @@ -751,24 +918,38 @@ def add_rds_instance(self, instance, region): # Skip instances we cannot address (e.g. private VPC subnet) return + # Set the inventory name + hostname = None + if self.hostname_variable: + if self.hostname_variable.startswith('tag_'): + hostname = instance.tags.get(self.hostname_variable[4:], None) + else: + hostname = getattr(instance, self.hostname_variable) + + # If we can't get a nice hostname, use the destination address + if not hostname: + hostname = dest + + hostname = self.to_safe(hostname).lower() + # Add to index - self.index[dest] = [region, instance.id] + self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: - self.inventory[instance.id] = [dest] + self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: - self.push(self.inventory, region, dest) + self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: - self.push(self.inventory, instance.availability_zone, dest) + self.push(self.inventory, instance.availability_zone, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.availability_zone) @@ -777,14 +958,14 @@ def add_rds_instance(self, instance, region): # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_class) - self.push(self.inventory, type_name, dest) + self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) - self.push(self.inventory, vpc_id_name, dest) + self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) @@ -793,31 +974,32 @@ def add_rds_instance(self, instance, region): try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) - self.push(self.inventory, key, dest) + self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: - self.fail_with_error('\n'.join(['Package boto seems a bit older.', + self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by engine if self.group_by_rds_engine: - self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) + self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group if self.group_by_rds_parameter_group: - self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: all RDS instances - self.push(self.inventory, 'rds', dest) + self.push(self.inventory, 'rds', hostname) - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest def add_elasticache_cluster(self, cluster, region): ''' Adds an ElastiCache cluster to the inventory and index, as long as @@ -1130,6 +1312,8 @@ def get_host_info_dict_from_instance(self, instance): instance_vars['ec2_placement'] = value.zone elif key == 'ec2_tags': for k, v in value.items(): + if self.expand_csv_tags and ',' in v: + v = map(lambda x: x.strip(), v.split(',')) key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': @@ -1140,6 +1324,10 @@ def get_host_info_dict_from_instance(self, instance): group_names.append(group.name) instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) + elif key == 'ec2_block_device_mapping': + instance_vars["ec2_block_devices"] = {} + for k, v in value.items(): + instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id else: pass # TODO Product codes if someone finds them useful @@ -1320,4 +1508,3 @@ def json_format_dict(self, data, pretty=False): # Run the script Ec2Inventory() - diff --git a/playbooks/provisioning/aws/cloudformation/files/nested/control-plane/default.yaml b/playbooks/provisioning/aws/cloudformation/files/nested/control-plane/default.yaml new file mode 100644 index 000000000..545eb84bd --- /dev/null +++ b/playbooks/provisioning/aws/cloudformation/files/nested/control-plane/default.yaml @@ -0,0 +1,229 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: OpenShift Integrated Control Plane +Parameters: + MasterSGs: + Type: 'List' + MasterExtElbSGs: + Type: 'List' + MasterIntElbSGs: + Type: 'List' + MasterInstanceType: + Type: String + Default: t2.medium + MasterImageId: + Type: 'AWS::EC2::Image::Id' + Default: ami-10251c7a + MasterInstanceProfile: + Type: String + KeyName: + Type: 'AWS::EC2::KeyPair::KeyName' + Master01Subnet: + Type: 'AWS::EC2::Subnet::Id' + Master02Subnet: + Type: 'AWS::EC2::Subnet::Id' + Master03Subnet: + Type: 'AWS::EC2::Subnet::Id' + MasterApiPort: + Type: Number + Default: 443 + MasterRootVolSize: + Type: String + Default: 10 + MasterDockerVolSize: + Type: String + Default: 25 + MasterEtcdVolSize: + Type: String + Default: 25 + MasterUserData: + Type: String + +Conditions: + SetMasterInstanceProfile: + 'Fn::Not': + - 'Fn::Equals': + - '' + - Ref: MasterInstanceProfile + +Resources: + Master01: + Type: 'AWS::EC2::Instance' + Properties: + ImageId: + Ref: MasterImageId + KeyName: + Ref: KeyName + InstanceType: + Ref: MasterInstanceType + SecurityGroupIds: + Ref: MasterSGs + SubnetId: + Ref: Master01Subnet + IamInstanceProfile: + Ref: MasterInstanceProfile + BlockDeviceMappings: + - DeviceName: /dev/sda1 + Ebs: + DeleteOnTermination: True + VolumeSize: + Ref: MasterRootVolSize + VolumeType: gp2 + - DeviceName: /dev/xvdb + Ebs: + DeleteOnTermination: True + VolumeSize: + Ref: MasterDockerVolSize + VolumeType: gp2 + - DeviceName: /dev/xvdc + Ebs: + DeleteOnTermination: True + VolumeSize: + Ref: MasterEtcdVolSize + VolumeType: gp2 + UserData: + Ref: MasterUserData + Master02: + Type: 'AWS::EC2::Instance' + Properties: + ImageId: + Ref: MasterImageId + KeyName: + Ref: KeyName + InstanceType: + Ref: MasterInstanceType + SecurityGroupIds: + Ref: MasterSGs + SubnetId: + Ref: Master02Subnet + IamInstanceProfile: + Ref: MasterInstanceProfile + BlockDeviceMappings: + - DeviceName: /dev/sda1 + Ebs: + DeleteOnTermination: True + VolumeSize: + Ref: MasterRootVolSize + VolumeType: gp2 + - DeviceName: /dev/xvdb + Ebs: + DeleteOnTermination: True + VolumeSize: + Ref: MasterDockerVolSize + VolumeType: gp2 + - DeviceName: /dev/xvdc + Ebs: + DeleteOnTermination: True + VolumeSize: + Ref: MasterEtcdVolSize + VolumeType: gp2 + UserData: + Ref: MasterUserData + Master03: + Type: 'AWS::EC2::Instance' + Properties: + ImageId: + Ref: MasterImageId + KeyName: + Ref: KeyName + InstanceType: + Ref: MasterInstanceType + SecurityGroupIds: + Ref: MasterSGs + SubnetId: + Ref: Master03Subnet + IamInstanceProfile: + Ref: MasterInstanceProfile + BlockDeviceMappings: + - DeviceName: /dev/sda1 + Ebs: + DeleteOnTermination: True + VolumeSize: + Ref: MasterRootVolSize + VolumeType: gp2 + - DeviceName: /dev/xvdb + Ebs: + DeleteOnTermination: True + VolumeSize: + Ref: MasterDockerVolSize + VolumeType: gp2 + - DeviceName: /dev/xvdc + Ebs: + DeleteOnTermination: True + VolumeSize: + Ref: MasterEtcdVolSize + VolumeType: gp2 + UserData: + Ref: MasterUserData + MasterIntElb: + Type: 'AWS::ElasticLoadBalancing::LoadBalancer' + Properties: + CrossZone: True + ConnectionSettings: + IdleTimeout: 3600 + Listeners: + - InstancePort: + Ref: MasterApiPort + InstanceProtocol: TCP + LoadBalancerPort: + Ref: MasterApiPort + Protocol: TCP + Scheme: internal + SecurityGroups: + Ref: MasterIntElbSGs + Subnets: + - Ref: Master01Subnet + - Ref: Master02Subnet + - Ref: Master03Subnet + Instances: + - Ref: Master01 + - Ref: Master02 + - Ref: Master03 + HealthCheck: + HealthyThreshold: 2 + Interval: 5 + Timeout: 2 + UnhealthyThreshold: 2 + Target: + 'Fn::Join': + - '' + - - 'Fn::Join': + - ':' + - - 'HTTPS' + - Ref: MasterApiPort + - /healthz/ready + MasterExtElb: + Type: 'AWS::ElasticLoadBalancing::LoadBalancer' + Properties: + CrossZone: True + ConnectionSettings: + IdleTimeout: 3600 + Listeners: + - InstancePort: + Ref: MasterApiPort + InstanceProtocol: TCP + LoadBalancerPort: + Ref: MasterApiPort + Protocol: TCP + SecurityGroups: + Ref: MasterExtElbSGs + Subnets: + - Ref: Master01Subnet + - Ref: Master02Subnet + - Ref: Master03Subnet + Instances: + - Ref: Master01 + - Ref: Master02 + - Ref: Master03 + HealthCheck: + HealthyThreshold: 2 + Interval: 5 + Timeout: 2 + UnhealthyThreshold: 2 + Target: + 'Fn::Join': + - '' + - - 'Fn::Join': + - ':' + - - 'HTTPS' + - Ref: MasterApiPort + - /healthz/ready diff --git a/playbooks/provisioning/aws/cloudformation/files/nested/iam-profiles/default.yaml b/playbooks/provisioning/aws/cloudformation/files/nested/iam-profiles/default.yaml new file mode 100644 index 000000000..e2ce8b560 --- /dev/null +++ b/playbooks/provisioning/aws/cloudformation/files/nested/iam-profiles/default.yaml @@ -0,0 +1,65 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: OpenShift IAM Profiles +Outputs: + MasterInstanceProfile: + Description: Master Instance Profile + Value: + Ref: MasterInstanceProfile + NodeInstanceProfile: + Description: Node Instance Profile + Value: + Ref: NodeInstanceProfile +Resources: + MasterPolicy: + Type: 'AWS::IAM::Role' + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: + - ec2.amazonaws.com + Action: + - 'sts:AssumeRole' + Policies: + - PolicyName: openshift-master + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: 'ec2:*' + Resource: '*' + - Effect: Allow + Action: 'elasticloadbalancing:*' + Resource: '*' + NodePolicy: + Type: 'AWS::IAM::Role' + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: + - ec2.amazonaws.com + Action: + - 'sts:AssumeRole' + Policies: + - PolicyName: openshift-node + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: 'ec2:Describe*' + Resource: '*' + MasterInstanceProfile: + Type: 'AWS::IAM::InstanceProfile' + Properties: + Roles: + - Ref: MasterPolicy + NodeInstanceProfile: + Type: 'AWS::IAM::InstanceProfile' + Properties: + Roles: + - Ref: NodePolicy diff --git a/playbooks/provisioning/aws/cloudformation/files/nested/master-user-data.yaml b/playbooks/provisioning/aws/cloudformation/files/nested/master-user-data.yaml new file mode 100644 index 000000000..72efa4558 --- /dev/null +++ b/playbooks/provisioning/aws/cloudformation/files/nested/master-user-data.yaml @@ -0,0 +1,25 @@ +#cloud-config +cloud_config_modules: +- disk_setup +- mounts + +fs_setup: +- label: etcd_storage + filesystem: xfs + device: /dev/xdc + partition: auto + +runcmd: +- mkdir -p /var/lib/etcd + +mounts: +- [ /dev/xvdc, /var/lib/etcd, xfs, 'defaults' ] + +write_files: +- content: | + DEVS='/dev/xvdb' + VG=docker_vol + DATA_SIZE=95%VG + EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize=3G" + path: /etc/sysconfig/docker-storage-setup + owner: root:root diff --git a/playbooks/provisioning/aws/cloudformation/files/nested/security-groups/default.yaml b/playbooks/provisioning/aws/cloudformation/files/nested/security-groups/default.yaml new file mode 100644 index 000000000..622bb79c8 --- /dev/null +++ b/playbooks/provisioning/aws/cloudformation/files/nested/security-groups/default.yaml @@ -0,0 +1,285 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: OpenShift Cluster Template +Parameters: + VpcId: + Type: "AWS::EC2::VPC::Id" + MasterApiPort: + Type: Number + Default: 443 +Outputs: + MasterSecurityGroups: + Description: Master Security Groups + Value: + 'Fn::Join': + - ',' + - - Ref: SshSG + - Ref: EtcdSG + - Ref: MasterSG + - Ref: NodeSG + NodeSecurityGroups: + Description: Node Security Groups + Value: + 'Fn::Join': + - ',' + - - Ref: SshSG + - Ref: NodeSG + EtcdSecurityGroups: + Description: etcd Security Groups + Value: + 'Fn::Join': + - ',' + - - Ref: SshSG + - Ref: EtcdSG + RouterSecurityGroups: + Description: Router Security Groups + Value: + 'Fn::Join': + - ',' + - - Ref: SshSG + - Ref: NodeSG + - Ref: RouterSG + RouterElbSecurityGroups: + Description: Router ELB Security Groups + Value: + Ref: RouterElbSG + MasterExtElbSecurityGroups: + Description: Master External ELB Security Groups + Value: + Ref: MasterExtElbSG + MasterIntElbSecurityGroups: + Description: Master Internal ELB Security Groups + Value: + Ref: MasterIntElbSG +Resources: + SshSG: + Type: 'AWS::EC2::SecurityGroup' + Properties: + GroupDescription: SSH Access + VpcId: + Ref: VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: 0.0.0.0/0 + MasterSG: + Type: 'AWS::EC2::SecurityGroup' + Properties: + GroupDescription: OpenShift Masters + VpcId: + Ref: VpcId + EtcdSG: + Type: 'AWS::EC2::SecurityGroup' + DependsOn: + - MasterSG + Properties: + GroupDescription: OpenShift etcd + VpcId: + Ref: VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 2379 + ToPort: 2379 + SourceSecurityGroupId: + Ref: MasterSG + NodeSG: + Type: 'AWS::EC2::SecurityGroup' + DependsOn: + - MasterSG + Properties: + GroupDescription: OpenShift Nodes + VpcId: + Ref: VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 10250 + ToPort: 10250 + SourceSecurityGroupId: + Ref: MasterSG + RouterSG: + Type: 'AWS::EC2::SecurityGroup' + Properties: + GroupDescription: OpenShift Router + VpcId: + Ref: VpcId + MasterExtElbSG: + Type: 'AWS::EC2::SecurityGroup' + DependsOn: + - MasterSG + Properties: + GroupDescription: OpenShift Master External ELB + VpcId: + Ref: VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: + Ref: MasterApiPort + ToPort: + Ref: MasterApiPort + CidrIp: 0.0.0.0/0 + SecurityGroupEgress: + - IpProtocol: tcp + FromPort: + Ref: MasterApiPort + ToPort: + Ref: MasterApiPort + DestinationSecurityGroupId: + Ref: MasterSG + MasterIntElbSG: + Type: 'AWS::EC2::SecurityGroup' + DependsOn: + - MasterSG + - NodeSG + Properties: + GroupDescription: OpenShift Master Internal ELB + VpcId: + Ref: VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: + Ref: MasterApiPort + ToPort: + Ref: MasterApiPort + SourceSecurityGroupId: + Ref: MasterSG + - IpProtocol: tcp + FromPort: + Ref: MasterApiPort + ToPort: + Ref: MasterApiPort + SourceSecurityGroupId: + Ref: NodeSG + SecurityGroupEgress: + - IpProtocol: tcp + FromPort: + Ref: MasterApiPort + ToPort: + Ref: MasterApiPort + DestinationSecurityGroupId: + Ref: MasterSG + RouterElbSG: + Type: 'AWS::EC2::SecurityGroup' + DependsOn: + - RouterSG + Properties: + GroupDescription: OpenShift Router ELB + VpcId: + Ref: VpcId + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 80 + ToPort: 80 + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: 443 + ToPort: 443 + CidrIp: 0.0.0.0/0 + SecurityGroupEgress: + - IpProtocol: tcp + FromPort: 80 + ToPort: 80 + DestinationSecurityGroupId: + Ref: RouterSG + - IpProtocol: tcp + FromPort: 443 + ToPort: 443 + DestinationSecurityGroupId: + Ref: RouterSG + RouterIngressHTTP: + Type: 'AWS::EC2::SecurityGroupIngress' + Properties: + GroupId: + Ref: RouterSG + IpProtocol: tcp + FromPort: 80 + ToPort: 80 + SourceSecurityGroupId: + Ref: RouterElbSG + RouterIngressHTTPS: + Type: 'AWS::EC2::SecurityGroupIngress' + Properties: + GroupId: + Ref: RouterSG + IpProtocol: tcp + FromPort: 443 + ToPort: 443 + SourceSecurityGroupId: + Ref: RouterElbSG + NodeIngressNodeKubelet: + Type: 'AWS::EC2::SecurityGroupIngress' + Properties: + GroupId: + Ref: NodeSG + IpProtocol: tcp + FromPort: 10250 + ToPort: 10250 + SourceSecurityGroupId: + Ref: NodeSG + NodeIngressNodeVXLAN: + Type: 'AWS::EC2::SecurityGroupIngress' + Properties: + GroupId: + Ref: NodeSG + IpProtocol: udp + FromPort: 4789 + ToPort: 4789 + SourceSecurityGroupId: + Ref: NodeSG + MasterIngressIntLB: + Type: 'AWS::EC2::SecurityGroupIngress' + Properties: + GroupId: + Ref: MasterSG + IpProtocol: tcp + FromPort: + Ref: MasterApiPort + ToPort: + Ref: MasterApiPort + SourceSecurityGroupId: + Ref: MasterIntElbSG + MasterIngressExtLB: + Type: 'AWS::EC2::SecurityGroupIngress' + Properties: + GroupId: + Ref: MasterSG + IpProtocol: tcp + FromPort: + Ref: MasterApiPort + ToPort: + Ref: MasterApiPort + SourceSecurityGroupId: + Ref: MasterExtElbSG + MasterIngressNodesApi: + Type: 'AWS::EC2::SecurityGroupIngress' + Properties: + GroupId: + Ref: MasterSG + IpProtocol: tcp + FromPort: + Ref: MasterApiPort + ToPort: + Ref: MasterApiPort + SourceSecurityGroupId: + Ref: NodeSG + MasterIngressMasterApi: + Type: 'AWS::EC2::SecurityGroupIngress' + Properties: + GroupId: + Ref: MasterSG + IpProtocol: tcp + FromPort: + Ref: MasterApiPort + ToPort: + Ref: MasterApiPort + SourceSecurityGroupId: + Ref: MasterSG + EtcdIngressEtcd: + Type: 'AWS::EC2::SecurityGroupIngress' + Properties: + GroupId: + Ref: EtcdSG + IpProtocol: tcp + FromPort: 2379 + ToPort: 2380 + SourceSecurityGroupId: + Ref: EtcdSG diff --git a/playbooks/provisioning/aws/cloudformation/files/nested/three_master_infra_asg_node_asg_no_bastion.yaml b/playbooks/provisioning/aws/cloudformation/files/nested/three_master_infra_asg_node_asg_no_bastion.yaml new file mode 100644 index 000000000..871050978 --- /dev/null +++ b/playbooks/provisioning/aws/cloudformation/files/nested/three_master_infra_asg_node_asg_no_bastion.yaml @@ -0,0 +1,237 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: OpenShift Cluster Template +Parameters: + NamePrefix: + Type: String + Default: openshift_ + MasterApiPort: + Type: Number + Default: 443 + KeyName: + Type: 'AWS::EC2::KeyPair::KeyName' + + + VpcId: + Type: String + VpcTemplateUrl: + Type: String + Default: 'https://s3.amazonaws.com/openshift-cloudformation-templates/vpc/default.yaml' + NumSubnets: + Type: Number + MinValue: 1 + MaxValue: 4 + Default: 1 + SubnetAvailabilityZones: + Type: 'List' + SubnetCidrBlocks: + Type: CommaDelimitedList + Default: '172.18.0.0/24,172.18.1.0/24,172.18.2.0/24,172.18.3.0/24' + VpcCidrBlock: + Type: String + Default: 172.18.0.0/16 + + + MasterExtElbSecurityGroups: + Type: CommaDelimitedList + MasterIntElbSecurityGroups: + Type: CommaDelimitedList + RouterElbSecurityGroups: + Type: CommaDelimitedList + MasterSecurityGroups: + Type: CommaDelimitedList + EtcdSecurityGroups: + Type: CommaDelimitedList + NodeSecurityGroups: + Type: CommaDelimitedList + RouterSecurityGroups: + Type: CommaDelimitedList + SecurityGroupTemplateUrl: + Type: String + Default: 'https://s3.amazonaws.com/openshift-cloudformation-templates/security-groups/default.yaml' + + + MasterInstanceProfile: + Type: String + NodeInstanceProfile: + Type: String + IamInstanceProfileTemplateUrl: + Type: String + Default: 'https://s3.amazonaws.com/openshift-cloudformation-templates/iam-profiles/default.yaml' + + MasterUserData: + Type: String + MasterInstanceSubnets: + Type: CommaDelimitedList + ControlPlaneTemplateUrl: + Type: String + Default: 'https://s3.amazonaws.com/openshift-cloudformation-templates/control-plane/default.yaml' + +Conditions: + CreateVpc: + 'Fn::Equals': + - '' + - Ref: VpcId + CreateSecurityGroups: + 'Fn::Or': + - 'Fn::Equals': + - '' + - 'Fn::Join': + - ',' + - Ref: MasterSecurityGroups + - 'Fn::Equals': + - '' + - 'Fn::Join': + - ',' + - Ref: NodeSecurityGroups + - 'Fn::Equals': + - '' + - 'Fn::Join': + - ',' + - Ref: RouterSecurityGroups + - 'Fn::Equals': + - '' + - 'Fn::Join': + - ',' + - Ref: EtcdSecurityGroups + - 'Fn::Equals': + - '' + - 'Fn::Join': + - ',' + - Ref: RouterElbSecurityGroups + - 'Fn::Equals': + - '' + - 'Fn::Join': + - ',' + - Ref: MasterExtElbSecurityGroups + - 'Fn::Equals': + - '' + - 'Fn::Join': + - ',' + - Ref: MasterIntElbSecurityGroups + + CreateIAMProfiles: + 'Fn::Or': + - 'Fn::Equals': + - '' + - Ref: MasterInstanceProfile + - 'Fn::Equals': + - '' + - Ref: NodeInstanceProfile + +Resources: + VpcStack: + Type: 'AWS::CloudFormation::Stack' + Condition: CreateVpc + Properties: + TemplateURL: + Ref: VpcTemplateUrl + Parameters: + VpcName: + 'Fn::Join': + - '' + - - Ref: NamePrefix + - _vpc + NumSubnets: + Ref: NumSubnets + SubnetAvailabilityZones: + 'Fn::Join': + - ',' + - Ref: SubnetAvailabilityZones + SubnetCidrBlocks: + 'Fn::Join': + - ',' + - Ref: SubnetCidrBlocks + VpcCidrBlock: + Ref: VpcCidrBlock + SGStack: + Type: 'AWS::CloudFormation::Stack' + Condition: CreateSecurityGroups + Properties: + TemplateURL: + Ref: SecurityGroupTemplateUrl + Parameters: + VpcId: + 'Fn::If': + - CreateVpc + - 'Fn::GetAtt': + - VpcStack + - Outputs.VpcId + - Ref: VpcId + MasterApiPort: + Ref: MasterApiPort + + IAMStack: + Type: 'AWS::CloudFormation::Stack' + Condition: CreateIAMProfiles + Properties: + TemplateURL: + Ref: IamInstanceProfileTemplateUrl + + ControlPlaneStack: + Type: 'AWS::CloudFormation::Stack' + Condition: CreateIAMProfiles + Properties: + TemplateURL: + Ref: ControlPlaneTemplateUrl + Parameters: + MasterSGs: + 'Fn::If': + - CreateSecurityGroups + - 'Fn::GetAtt': + - SGStack + - Outputs.MasterSecurityGroups + - Ref: MasterSecurityGroups + MasterExtElbSGs: + 'Fn::If': + - CreateSecurityGroups + - 'Fn::GetAtt': + - SGStack + - Outputs.MasterExtElbSecurityGroups + - Ref: MasterExtElbSecurityGroups + MasterIntElbSGs: + 'Fn::If': + - CreateSecurityGroups + - 'Fn::GetAtt': + - SGStack + - Outputs.MasterIntElbSecurityGroups + - Ref: MasterIntElbSecurityGroups + MasterInstanceProfile: + 'Fn::If': + - CreateIAMProfiles + - 'Fn::GetAtt': + - IAMStack + - Outputs.MasterInstanceProfile + - Ref: MasterInstanceProfile + MasterApiPort: + Ref: MasterApiPort + KeyName: + Ref: KeyName + MasterUserData: + Ref: MasterUserData + Master01Subnet: + 'Fn::If': + - CreateVpc + - 'Fn::GetAtt': + - VpcStack + - Outputs.VpcSubnet1 + - 'Fn::Select': + - 0 + - Ref: MasterInstanceSubnets + Master02Subnet: + 'Fn::If': + - CreateVpc + - 'Fn::GetAtt': + - VpcStack + - Outputs.VpcSubnet2 + - 'Fn::Select': + - 1 + - Ref: MasterInstanceSubnets + Master03Subnet: + 'Fn::If': + - CreateVpc + - 'Fn::GetAtt': + - VpcStack + - Outputs.VpcSubnet3 + - 'Fn::Select': + - 2 + - Ref: MasterInstanceSubnets diff --git a/playbooks/provisioning/aws/cloudformation/files/nested/vpc/default.yaml b/playbooks/provisioning/aws/cloudformation/files/nested/vpc/default.yaml new file mode 100644 index 000000000..e7c74d630 --- /dev/null +++ b/playbooks/provisioning/aws/cloudformation/files/nested/vpc/default.yaml @@ -0,0 +1,215 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: OpenShift VPC and Subnets Template +Outputs: + VpcId: + Description: Vpc Id + Value: + Ref: Vpc + VpcSubnets: + Description: VPC Subnets + Value: + 'Fn::Join': + - ',' + - - Ref: Subnet1 + - 'Fn::If': + - CreateSubnet2 + - Ref: Subnet2 + - Ref: 'AWS::NoValue' + - 'Fn::If': + - CreateSubnet3 + - Ref: Subnet3 + - Ref: 'AWS::NoValue' + - 'Fn::If': + - CreateSubnet4 + - Ref: Subnet4 + - Ref: 'AWS::NoValue' + VpcSubnet1: + Description: VPC Subnet01 + Value: + Ref: Subnet1 + VpcSubnet2: + Value: + 'Fn::If': + - CreateSubnet2 + - Ref: Subnet2 + - Ref: Subnet1 + VpcSubnet3: + Value: + 'Fn::If': + - CreateSubnet3 + - Ref: Subnet3 + - Ref: Subnet1 + VpcSubnet4: + Value: + 'Fn::If': + - CreateSubnet4 + - Ref: Subnet4 + - 'Fn::If': + - CreateSubnet2 + - Ref: Subnet2 + - Ref: Subnet1 +Parameters: + VpcName: + Type: String + VpcCidrBlock: + Type: String + NumSubnets: + Type: Number + MinValue: 1 + MaxValue: 4 + Default: 1 + SubnetAvailabilityZones: + Type: 'List' + SubnetCidrBlocks: + Type: CommaDelimitedList +Conditions: + CreateSubnet2: + 'Fn::Not': + - 'Fn::Equals': + - Ref: NumSubnets + - 1 + CreateSubnet3: + 'Fn::Or': + - 'Fn::Equals': + - Ref: NumSubnets + - 3 + - 'Fn::Equals': + - Ref: NumSubnets + - 4 + CreateSubnet4: + 'Fn::Equals': + - Ref: NumSubnets + - 4 +Resources: + Vpc: + Type: 'AWS::EC2::VPC' + Properties: + CidrBlock: + Ref: VpcCidrBlock + EnableDnsHostnames: True + EnableDnsSupport: True + Tags: + - Key: Name + Value: + Ref: VpcName + VpcInternetGateway: + Type: 'AWS::EC2::InternetGateway' + Properties: {} + VpcGA: + Type: 'AWS::EC2::VPCGatewayAttachment' + Properties: + InternetGatewayId: + Ref: VpcInternetGateway + VpcId: + Ref: Vpc + VpcRouteTable: + Properties: + VpcId: + Ref: Vpc + Type: 'AWS::EC2::RouteTable' + VPCRouteInternetGateway: + Properties: + DestinationCidrBlock: 0.0.0.0/0 + GatewayId: + Ref: VpcInternetGateway + RouteTableId: + Ref: VpcRouteTable + Type: 'AWS::EC2::Route' + Subnet1: + Type: 'AWS::EC2::Subnet' + DependsOn: + - Vpc + Properties: + AvailabilityZone: + 'Fn::Select': + - 0 + - Ref: SubnetAvailabilityZones + CidrBlock: + 'Fn::Select': + - 0 + - Ref: SubnetCidrBlocks + MapPublicIpOnLaunch: True + VpcId: + Ref: Vpc + Subnet1RTA: + Type: 'AWS::EC2::SubnetRouteTableAssociation' + Properties: + RouteTableId: + Ref: VpcRouteTable + SubnetId: + Ref: Subnet1 + Subnet2: + Type: 'AWS::EC2::Subnet' + Condition: CreateSubnet2 + DependsOn: + - Vpc + Properties: + AvailabilityZone: + 'Fn::Select': + - 1 + - Ref: SubnetAvailabilityZones + CidrBlock: + 'Fn::Select': + - 1 + - Ref: SubnetCidrBlocks + MapPublicIpOnLaunch: True + VpcId: + Ref: Vpc + Subnet2RTA: + Type: 'AWS::EC2::SubnetRouteTableAssociation' + Condition: CreateSubnet2 + Properties: + RouteTableId: + Ref: VpcRouteTable + SubnetId: + Ref: Subnet2 + Subnet3: + Type: 'AWS::EC2::Subnet' + Condition: CreateSubnet3 + DependsOn: + - Vpc + Properties: + AvailabilityZone: + 'Fn::Select': + - 2 + - Ref: SubnetAvailabilityZones + CidrBlock: + 'Fn::Select': + - 2 + - Ref: SubnetCidrBlocks + MapPublicIpOnLaunch: True + VpcId: + Ref: Vpc + Subnet3RTA: + Type: 'AWS::EC2::SubnetRouteTableAssociation' + Condition: CreateSubnet3 + Properties: + RouteTableId: + Ref: VpcRouteTable + SubnetId: + Ref: Subnet3 + Subnet4: + Type: 'AWS::EC2::Subnet' + Condition: CreateSubnet4 + DependsOn: + - Vpc + Properties: + AvailabilityZone: + 'Fn::Select': + - 3 + - Ref: SubnetAvailabilityZones + CidrBlock: + 'Fn::Select': + - 3 + - Ref: SubnetCidrBlocks + MapPublicIpOnLaunch: True + VpcId: + Ref: Vpc + Subnet4RTA: + Type: 'AWS::EC2::SubnetRouteTableAssociation' + Condition: CreateSubnet4 + Properties: + RouteTableId: + Ref: VpcRouteTable + SubnetId: + Ref: Subnet4 diff --git a/playbooks/provisioning/aws/cloudformation/lookup_plugins/ec2_zones_by_region.py b/playbooks/provisioning/aws/cloudformation/lookup_plugins/ec2_zones_by_region.py index 6b82e38e6..9987a3c3d 100644 --- a/playbooks/provisioning/aws/cloudformation/lookup_plugins/ec2_zones_by_region.py +++ b/playbooks/provisioning/aws/cloudformation/lookup_plugins/ec2_zones_by_region.py @@ -1,15 +1,21 @@ -from ansible import utils, errors +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + +from ansible.plugins.lookup import LookupBase +from ansible.errors import AnsibleError import boto.ec2 -class LookupModule(object): +class LookupModule(LookupBase): def __init__(self, basedir=None, **kwargs): self.basedir = basedir - def run(self, region, inject=None, **kwargs): + def run(self, regions, variables, **kwargs): try: - conn = boto.ec2.connect_to_region(region) - zones = [z.name for z in conn.get_all_zones()] + zones = [] + for region in regions: + conn = boto.ec2.connect_to_region(region) + zones += [z.name for z in conn.get_all_zones()] return zones - except e: - raise errors.AnsibleError("Could not lookup zones for region: %s\nexception: %s" % (region, e)) - + except Exception as e: + raise AnsibleError("Could not lookup zones for region: %s\nexception: %s" % (region, e)) diff --git a/playbooks/provisioning/aws/cloudformation/nested_setup.yml b/playbooks/provisioning/aws/cloudformation/nested_setup.yml new file mode 100644 index 000000000..c58a849c3 --- /dev/null +++ b/playbooks/provisioning/aws/cloudformation/nested_setup.yml @@ -0,0 +1,45 @@ +# vim: set ft=ansible: +--- +- name: 'Bootstrapping or Refreshing Environment' + hosts: localhost + connection: local + become: no + gather_facts: no + vars: + openshift_provisioning_cluster_id: test + openshift_provisioning_env_id: test + openshift_provisioning_aws_keyname: default + openshift_provisioning_aws_region: us-east-1 + openshift_provisioning_aws_subnet_azs: "{{ lookup('ec2_zones_by_region', openshift_provisioning_aws_region) }}" + openshift_provisioning_aws_stack_name: openshift-{{ openshift_provisioning_cluster_id }}-{{ openshift_provisioning_env_id }} + openshift_provisioning_aws_num_subnets: "{{ openshift_provisioning_aws_subnet_azs.split(',') | length }}" + tasks: + - name: Launch the CloudFormation Template + cloudformation: + region: "{{ openshift_provisioning_aws_region }}" + stack_name: "{{ openshift_provisioning_aws_stack_name }}" + state: present + tags: + environment: "{{ openshift_provisioning_env_id }}" + clusterid: "{{ openshift_provisioning_cluster_id }}" + template: files/nested/three_master_infra_asg_node_asg_no_bastion.yaml + template_parameters: + NamePrefix: "{{ openshift_provisioning_aws_stack_name }}" + NumSubnets: "{{ openshift_provisioning_aws_num_subnets }}" + SubnetAvailabilityZones: "{{ openshift_provisioning_aws_subnet_azs }}" + MasterSecurityGroups: "{{ openshift_provisioning_aws_master_security_groups | default('') }}" + NodeSecurityGroups: "{{ openshift_provisioning_aws_node_security_groups | default('') }}" + EtcdSecurityGroups: "{{ openshift_provisioning_aws_etcd_security_groups | default('') }}" + RouterSecurityGroups: "{{ openshift_provisioning_aws_router_security_groups | default('') }}" + RouterElbSecurityGroups: "{{ openshift_provisioning_aws_router_elb_security_groups | default('') }}" + MasterExtElbSecurityGroups: "{{ openshift_provisioning_aws_master_ext_elb_security_groups | default('') }}" + MasterIntElbSecurityGroups: "{{ openshift_provisioning_aws_master_int_elb_security_groups | default('') }}" + MasterInstanceProfile: "{{ openshift_provisioning_aws_master_instance_profile | default('') }}" + NodeInstanceProfile: "{{ openshift_provisioning_aws_node_instance_profile | default('') }}" + MasterInstanceSubnets: "{{ openshift_provisioning_aws_master_instance_subnets | default('') }}" + VpcId: "{{ openshift_provisioning_aws_vpc_id | default('') }}" + KeyName: "{{ openshift_provisioning_aws_keyname }}" + MasterUserData: "{{ lookup('file', 'nested/master-user-data.yaml') | b64encode }}" + register: cf_output + + - debug: var=cf_output From 517776f16387c1d83e82f045d7aaf4c5936c1f03 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 21 Nov 2016 16:02:27 -0500 Subject: [PATCH 2/2] update ec2_zones_by_region lookup plugin --- .../lookup_plugins/ec2_zones_by_region.py | 40 +++++++++++++++++-- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/playbooks/provisioning/aws/cloudformation/lookup_plugins/ec2_zones_by_region.py b/playbooks/provisioning/aws/cloudformation/lookup_plugins/ec2_zones_by_region.py index 9987a3c3d..38dff39b3 100644 --- a/playbooks/provisioning/aws/cloudformation/lookup_plugins/ec2_zones_by_region.py +++ b/playbooks/provisioning/aws/cloudformation/lookup_plugins/ec2_zones_by_region.py @@ -11,11 +11,43 @@ def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, regions, variables, **kwargs): + if isinstance(region, list): + region = region[0] + + if not isinstance(region, basestring): + raise AnsibleError("type of region is: %s region is: %s" % + (type(region), region)) + try: - zones = [] - for region in regions: - conn = boto.ec2.connect_to_region(region) - zones += [z.name for z in conn.get_all_zones()] + conn = boto.ec2.connect_to_region(region) + if conn is None: + raise AnsibleError("Could not connect to region %s" % region) + + zones = [z.name for z in conn.get_all_zones()] + if "us-east-1b" in zones: zones.remove("us-east-1b"); + return zones + except Exception as e: + raise AnsibleError("Could not lookup zones for region: %s\nexception: %s" % (region, e)) + + try: + conn = boto.ec2.connect_to_region(region) + if conn is None: + raise AnsibleError("Could not connet to region %s" % region) + zones = [z.name for z in conn.get_all_zones()] + vpc_conn = boto.vpc.connect_to_region(region) + vpcs = vpc_conn.get_all_vpcs() + default_vpcs = [ v for v in vpcs if v.is_default ] + + # If there are vpc subnets available, then gather list of zones + # from zones with subnets. This prevents returning regions that + # are not vpc enabled. If the account is an ec2 Classic account + # without any VPC subnets, this could result in returning zones + # that are not vpc-enabled. + subnets = vpc_conn.get_all_subnets() + if len(subnets) > 0: + subnet_zones = list(set([s.availability_zone for s in subnets])) + return subnet_zones + return zones except Exception as e: raise AnsibleError("Could not lookup zones for region: %s\nexception: %s" % (region, e))