diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index 8d34837dafb1..12c9592a969e 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -76,11 +76,6 @@ def _call(queue, args, kwargs): queue.put('ERROR') queue.put('Exception') queue.put('{0}\n{1}\n'.format(ex, trace)) - except SystemExit as ex: - trace = traceback.format_exc() - queue.put('ERROR') - queue.put('System exit') - queue.put('{0}\n{1}\n'.format(ex, trace)) return ret return _call @@ -189,10 +184,8 @@ class CloudClient(object): def __init__(self, path=None, opts=None, config_dir=None, pillars=None): if opts: self.opts = opts - elif path: - self.opts = salt.config.cloud_config(path) else: - self.opts = salt.config.cloud_config() + self.opts = salt.config.cloud_config(path) # Check the cache-dir exists. If not, create it. v_dirs = [self.opts['cachedir']] @@ -524,7 +517,6 @@ class Cloud(object): ''' def __init__(self, opts): self.opts = opts - self.client = CloudClient(opts=self.opts) self.clouds = salt.loader.clouds(self.opts) self.__filter_non_working_providers() self.__cached_provider_queries = {} @@ -2058,10 +2050,6 @@ def run_map(self, dmap): # Now sort the create list based on dependencies create_list = sorted(six.iteritems(dmap['create']), key=lambda x: x[1]['level']) - full_map = dmap['create'].copy() - if 'existing' in dmap: - full_map.update(dmap['existing']) - possible_master_list = sorted(six.iteritems(full_map), key=lambda x: x[1]['level']) output = {} if self.opts['parallel']: parallel_data = [] @@ -2069,127 +2057,117 @@ def run_map(self, dmap): master_minion_name = None master_host = None master_finger = None - for name, profile in possible_master_list: - if profile.get('make_master', False) is True: - master_name = name - master_profile = profile - - if master_name: - # If the master already exists, get the host - if master_name not in dmap['create']: - master_host = self.client.query() - for provider_part in master_profile['provider'].split(':'): - master_host = master_host[provider_part] - master_host = master_host[master_name][master_profile.get('ssh_interface', 'public_ips')] - if not master_host: - raise SaltCloudSystemExit( - 'Could not get the hostname of master {}.'.format(master_name) - ) - # Otherwise, deploy it as a new master - else: - master_minion_name = master_name - log.debug('Creating new master \'%s\'', master_name) - if salt.config.get_cloud_config_value( - 'deploy', + try: + master_name, master_profile = next(( + (name, profile) for name, profile in create_list + if profile.get('make_master', False) is True + )) + master_minion_name = master_name + log.debug('Creating new master \'%s\'', master_name) + if salt.config.get_cloud_config_value( + 'deploy', + master_profile, + self.opts + ) is False: + raise SaltCloudSystemExit( + 'Cannot proceed with \'make_master\' when salt deployment ' + 'is disabled(ex: --no-deploy).' + ) + + # Generate the master keys + log.debug('Generating master keys for \'%s\'', master_profile['name']) + priv, pub = salt.utils.cloud.gen_keys( + salt.config.get_cloud_config_value( + 'keysize', master_profile, self.opts - ) is False: - raise SaltCloudSystemExit( - 'Cannot proceed with \'make_master\' when salt deployment ' - 'is disabled(ex: --no-deploy).' - ) + ) + ) + master_profile['master_pub'] = pub + master_profile['master_pem'] = priv - # Generate the master keys - log.debug('Generating master keys for \'%s\'', master_profile['name']) + # Generate the fingerprint of the master pubkey in order to + # mitigate man-in-the-middle attacks + master_temp_pub = salt.utils.files.mkstemp() + with salt.utils.files.fopen(master_temp_pub, 'w') as mtp: + mtp.write(pub) + master_finger = salt.utils.crypt.pem_finger(master_temp_pub, sum_type=self.opts['hash_type']) + os.unlink(master_temp_pub) + + if master_profile.get('make_minion', True) is True: + master_profile.setdefault('minion', {}) + if 'id' in master_profile['minion']: + master_minion_name = master_profile['minion']['id'] + # Set this minion's master as local if the user has not set it + if 'master' not in master_profile['minion']: + master_profile['minion']['master'] = '127.0.0.1' + if master_finger is not None: + master_profile['master_finger'] = master_finger + + # Generate the minion keys to pre-seed the master: + for name, profile in create_list: + make_minion = salt.config.get_cloud_config_value( + 'make_minion', profile, self.opts, default=True + ) + if make_minion is False: + continue + log.debug('Generating minion keys for \'%s\'', profile['name']) priv, pub = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value( 'keysize', - master_profile, + profile, self.opts ) ) - master_profile['master_pub'] = pub - master_profile['master_pem'] = priv - - # Generate the fingerprint of the master pubkey in order to - # mitigate man-in-the-middle attacks - master_temp_pub = salt.utils.files.mkstemp() - with salt.utils.files.fopen(master_temp_pub, 'w') as mtp: - mtp.write(pub) - master_finger = salt.utils.crypt.pem_finger(master_temp_pub, sum_type=self.opts['hash_type']) - os.unlink(master_temp_pub) - - if master_profile.get('make_minion', True) is True: - master_profile.setdefault('minion', {}) - if 'id' in master_profile['minion']: - master_minion_name = master_profile['minion']['id'] - # Set this minion's master as local if the user has not set it - if 'master' not in master_profile['minion']: - master_profile['minion']['master'] = '127.0.0.1' - if master_finger is not None: - master_profile['master_finger'] = master_finger - - # Generate the minion keys to pre-seed the master: - for name, profile in create_list: - make_minion = salt.config.get_cloud_config_value( - 'make_minion', profile, self.opts, default=True - ) - if make_minion is False: - continue + profile['pub_key'] = pub + profile['priv_key'] = priv + # Store the minion's public key in order to be pre-seeded in + # the master + master_profile.setdefault('preseed_minion_keys', {}) + master_profile['preseed_minion_keys'].update({name: pub}) + + local_master = False + if master_profile['minion'].get('local_master', False) and \ + master_profile['minion'].get('master', None) is not None: + # The minion is explicitly defining a master and it's + # explicitly saying it's the local one + local_master = True - log.debug('Generating minion keys for \'%s\'', profile['name']) - priv, pub = salt.utils.cloud.gen_keys( - salt.config.get_cloud_config_value( - 'keysize', - profile, - self.opts - ) - ) - profile['pub_key'] = pub - profile['priv_key'] = priv - # Store the minion's public key in order to be pre-seeded in - # the master - master_profile.setdefault('preseed_minion_keys', {}) - master_profile['preseed_minion_keys'].update({name: pub}) - - local_master = False - if master_profile['minion'].get('local_master', False) and \ - master_profile['minion'].get('master', None) is not None: - # The minion is explicitly defining a master and it's - # explicitly saying it's the local one - local_master = True - - out = self.create(master_profile, local_master=local_master) - - if not isinstance(out, dict): - log.debug('Master creation details is not a dictionary: %s', out) - - elif 'Errors' in out: - raise SaltCloudSystemExit( - 'An error occurred while creating the master, not ' - 'continuing: {0}'.format(out['Errors']) + out = self.create(master_profile, local_master=local_master) + + if not isinstance(out, dict): + log.debug( + 'Master creation details is not a dictionary: {0}'.format( + out ) + ) - deploy_kwargs = ( - self.opts.get('show_deploy_args', False) is True and - # Get the needed data - out.get('deploy_kwargs', {}) or - # Strip the deploy_kwargs from the returned data since we don't - # want it shown in the console. - out.pop('deploy_kwargs', {}) + elif 'Errors' in out: + raise SaltCloudSystemExit( + 'An error occurred while creating the master, not ' + 'continuing: {0}'.format(out['Errors']) ) - master_host = deploy_kwargs.get('salt_host', deploy_kwargs.get('host', None)) - if master_host is None: - raise SaltCloudSystemExit( - 'Host for new master {0} was not found, ' - 'aborting map'.format( - master_name - ) + deploy_kwargs = ( + self.opts.get('show_deploy_args', False) is True and + # Get the needed data + out.get('deploy_kwargs', {}) or + # Strip the deploy_kwargs from the returned data since we don't + # want it shown in the console. + out.pop('deploy_kwargs', {}) + ) + + master_host = deploy_kwargs.get('salt_host', deploy_kwargs.get('host', None)) + if master_host is None: + raise SaltCloudSystemExit( + 'Host for new master {0} was not found, ' + 'aborting map'.format( + master_name ) - output[master_name] = out - else: + ) + output[master_name] = out + except StopIteration: log.debug('No make_master found in map') # Local master? # Generate the fingerprint of the master pubkey in order to diff --git a/salt/cloud/cli.py b/salt/cloud/cli.py index a13035f4dd7e..385a73b0edd1 100644 --- a/salt/cloud/cli.py +++ b/salt/cloud/cli.py @@ -28,7 +28,7 @@ import salt.utils.parsers import salt.utils.user from salt.exceptions import SaltCloudException, SaltCloudSystemExit -from salt.utils.verify import check_user, verify_env, verify_files, verify_log +from salt.utils.verify import check_user, verify_env, verify_log_files, verify_log # Import 3rd-party libs from salt.ext import six @@ -70,11 +70,9 @@ def run(self): root_dir=self.config['root_dir'], ) logfile = self.config['log_file'] - if logfile is not None and not logfile.startswith('tcp://') \ - and not logfile.startswith('udp://') \ - and not logfile.startswith('file://'): + if logfile is not None: # Logfile is not using Syslog, verify - verify_files([logfile], salt_master_user) + verify_log_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: %s', err) sys.exit(err.errno) diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py index 32cf9bf5133e..a422feca4fc9 100644 --- a/salt/cloud/clouds/azurearm.py +++ b/salt/cloud/clouds/azurearm.py @@ -34,9 +34,6 @@ * ``client_id`` * ``secret`` - if using MSI-style authentication: - * ``subscription_id`` - Optional provider parameters: **cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values: @@ -254,22 +251,14 @@ def __is_provider_configured(opts, provider, required_keys=()): provider = __is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, - ('subscription_id', 'tenant', 'client_id', 'secret'), + ('subscription_id', 'tenant', 'client_id', 'secret') ) if provider is False: provider = __is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, - ('subscription_id', 'username', 'password'), - ) - - if provider is False: - # check if using MSI style credentials... - provider = config.is_provider_configured( - __opts__, - __active_provider_name__ or __virtualname__, - required_keys=('subscription_id',), + ('subscription_id', 'username', 'password') ) return provider @@ -322,13 +311,11 @@ def get_conn(client_type): ) conn_kwargs.update({'client_id': client_id, 'secret': secret, 'tenant': tenant}) - - username = config.get_cloud_config_value( - 'username', - get_configured_provider(), __opts__, search_global=False - ) - - if username is not None: + else: + username = config.get_cloud_config_value( + 'username', + get_configured_provider(), __opts__, search_global=False + ) password = config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False @@ -1048,15 +1035,6 @@ def request_instance(vm_): default=False ) - vm_password = salt.utils.stringutils.to_str( - config.get_cloud_config_value( - 'ssh_password', vm_, __opts__, search_global=True, - default=config.get_cloud_config_value( - 'win_password', vm_, __opts__, search_global=True - ) - ) - ) - os_kwargs = {} win_installer = config.get_cloud_config_value( 'win_installer', vm_, __opts__, search_global=True @@ -1074,6 +1052,16 @@ def request_instance(vm_): ssh=sshconfiguration, ) os_kwargs['linux_configuration'] = linuxconfiguration + vm_password = None + else: + vm_password = salt.utils.stringutils.to_str( + config.get_cloud_config_value( + 'ssh_password', vm_, __opts__, search_global=True, + default=config.get_cloud_config_value( + 'win_password', vm_, __opts__, search_global=True + ) + ) + ) if win_installer or (vm_password is not None and not disable_password_authentication): if not isinstance(vm_password, str): @@ -1340,7 +1328,7 @@ def request_instance(vm_): ), network_profile=NetworkProfile( network_interfaces=[ - NetworkInterfaceReference(vm_['iface_id']), + NetworkInterfaceReference(id=vm_['iface_id']), ], ), availability_set=availability_set, diff --git a/salt/cloud/clouds/ec2.py b/salt/cloud/clouds/ec2.py index 6b1f339f791e..e9ce08cd8bff 100644 --- a/salt/cloud/clouds/ec2.py +++ b/salt/cloud/clouds/ec2.py @@ -71,16 +71,13 @@ # Pass userdata to the instance to be created userdata_file: /etc/salt/my-userdata-file - # Instance termination protection setting - # Default is disabled - termination_protection: False - :depends: requests ''' # pylint: disable=invalid-name,function-redefined # Import python libs from __future__ import absolute_import, print_function, unicode_literals +from functools import cmp_to_key import os import sys import stat @@ -95,6 +92,7 @@ import binascii import datetime import base64 +import msgpack import re import decimal @@ -104,7 +102,6 @@ import salt.utils.files import salt.utils.hashutils import salt.utils.json -import salt.utils.msgpack import salt.utils.stringutils import salt.utils.yaml from salt._compat import ElementTree as ET @@ -1230,7 +1227,7 @@ def get_imageid(vm_): _t = lambda x: datetime.datetime.strptime(x['creationDate'], '%Y-%m-%dT%H:%M:%S.%fZ') image_id = sorted(aws.query(params, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4'), - lambda i, j: salt.utils.compat.cmp(_t(i), _t(j)) + key=cmp_to_key(lambda i, j: salt.utils.compat.cmp(_t(i), _t(j))) )[-1]['imageId'] get_imageid.images[image] = image_id return image_id @@ -1935,18 +1932,6 @@ def request_instance(vm_=None, call=None): 'del_root_vol_on_destroy', vm_, __opts__, search_global=False ) - set_termination_protection = config.get_cloud_config_value( - 'termination_protection', vm_, __opts__, search_global=False - ) - - if set_termination_protection is not None: - if not isinstance(set_termination_protection, bool): - raise SaltCloudConfigError( - '\'termination_protection\' should be a boolean value.' - ) - params.update(_param_from_config(spot_prefix + 'DisableApiTermination', - set_termination_protection)) - if set_del_root_vol_on_destroy and not isinstance(set_del_root_vol_on_destroy, bool): raise SaltCloudConfigError( '\'del_root_vol_on_destroy\' should be a boolean value.' @@ -4482,14 +4467,11 @@ def create_keypair(kwargs=None, call=None): data = aws.query(params, return_url=True, - return_root=True, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4') - keys = [x for x in data[0] if 'requestId' not in x] - - return (keys, data[1]) + return data def import_keypair(kwargs=None, call=None): @@ -5018,7 +5000,7 @@ def _parse_pricing(url, name): __opts__['cachedir'], 'ec2-pricing-{0}.p'.format(name) ) with salt.utils.files.fopen(outfile, 'w') as fho: - salt.utils.msgpack.dump(regions, fho) + msgpack.dump(regions, fho) return True @@ -5086,8 +5068,7 @@ def show_pricing(kwargs=None, call=None): update_pricing({'type': name}, 'function') with salt.utils.files.fopen(pricefile, 'r') as fhi: - ec2_price = salt.utils.stringutils.to_unicode( - salt.utils.msgpack.load(fhi)) + ec2_price = salt.utils.stringutils.to_unicode(msgpack.load(fhi)) region = get_location(profile) size = profile.get('size', None) diff --git a/salt/cloud/clouds/gce.py b/salt/cloud/clouds/gce.py index 7fb6a55dbcf0..8466ac20ad1c 100644 --- a/salt/cloud/clouds/gce.py +++ b/salt/cloud/clouds/gce.py @@ -53,6 +53,7 @@ import re import pprint import logging +import msgpack from ast import literal_eval from salt.utils.versions import LooseVersion as _LooseVersion @@ -90,7 +91,6 @@ import salt.utils.cloud import salt.utils.files import salt.utils.http -import salt.utils.msgpack import salt.config as config from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import from salt.exceptions import ( @@ -255,12 +255,6 @@ def _expand_address(addy): return ret -def _expand_region(region): - ret = {} - ret['name'] = region.name - return ret - - def _expand_balancer(lb): ''' Convert the libcloud load-balancer object into something more serializable. @@ -1257,7 +1251,6 @@ def create_address(kwargs=None, call=None): name = kwargs['name'] ex_region = kwargs['region'] ex_address = kwargs.get("address", None) - kwargs['region'] = _expand_region(kwargs['region']) conn = get_conn() @@ -1265,7 +1258,7 @@ def create_address(kwargs=None, call=None): 'event', 'create address', 'salt/cloud/address/creating', - args=salt.utils.data.simple_types_filter(kwargs), + args=kwargs, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) @@ -1276,7 +1269,7 @@ def create_address(kwargs=None, call=None): 'event', 'created address', 'salt/cloud/address/created', - args=salt.utils.data.simple_types_filter(kwargs), + args=kwargs, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) @@ -2485,19 +2478,13 @@ def request_instance(vm_): if external_ip.lower() == 'ephemeral': external_ip = 'ephemeral' - vm_['external_ip'] = external_ip elif external_ip == 'None': external_ip = None - vm_['external_ip'] = external_ip else: region = __get_region(conn, vm_) external_ip = __create_orget_address(conn, external_ip, region) - vm_['external_ip'] = { - 'name': external_ip.name, - 'address': external_ip.address, - 'region': external_ip.region.name - } kwargs['external_ip'] = external_ip + vm_['external_ip'] = external_ip if LIBCLOUD_VERSION_INFO > (0, 15, 1): @@ -2521,27 +2508,6 @@ def request_instance(vm_): '\'pd-standard\', \'pd-ssd\'' ) - # GCE accelerator options are only supported as of libcloud >= 2.3.0 - # and Python 3+ is required so that libcloud will detect a type of - # 'string' rather than 'unicode' - if LIBCLOUD_VERSION_INFO >= (2, 3, 0) and isinstance(u'test', str): - - kwargs.update({ - 'ex_accelerator_type': config.get_cloud_config_value( - 'ex_accelerator_type', vm_, __opts__, default=None), - 'ex_accelerator_count': config.get_cloud_config_value( - 'ex_accelerator_count', vm_, __opts__, default=None) - }) - if kwargs.get('ex_accelerator_type'): - log.warning( - 'An accelerator is being attached to this instance, ' - 'the ex_on_host_maintenance setting is being set to ' - '\'TERMINATE\' as a result' - ) - kwargs.update({ - 'ex_on_host_maintenance': 'TERMINATE' - }) - log.info( 'Creating GCE instance %s in %s', vm_['name'], kwargs['location'].name @@ -2663,7 +2629,7 @@ def update_pricing(kwargs=None, call=None): __opts__['cachedir'], 'gce-pricing.p' ) with salt.utils.files.fopen(outfile, 'w') as fho: - salt.utils.msgpack.dump(price_json['dict'], fho) + msgpack.dump(price_json['dict'], fho) return True @@ -2702,7 +2668,7 @@ def show_pricing(kwargs=None, call=None): update_pricing() with salt.utils.files.fopen(pricefile, 'r') as fho: - sizes = salt.utils.msgpack.load(fho) + sizes = msgpack.load(fho) per_hour = float(sizes['gcp_price_list'][size][region]) diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index 55c1b2db9017..4192591a9086 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -58,9 +58,8 @@ from __future__ import absolute_import, print_function, unicode_literals import logging -import os -import sys import uuid +import os from xml.etree import ElementTree @@ -466,10 +465,10 @@ def create(vm_): ) return ret - except Exception: # pylint: disable=broad-except + except Exception as e: # pylint: disable=broad-except do_cleanup(cleanup) # throw the root cause after cleanup - six.reraise(*sys.exc_info()) + raise e def do_cleanup(cleanup): diff --git a/salt/cloud/clouds/oneandone.py b/salt/cloud/clouds/oneandone.py index eae4e3e3d5f9..2126cc839da4 100644 --- a/salt/cloud/clouds/oneandone.py +++ b/salt/cloud/clouds/oneandone.py @@ -68,10 +68,6 @@ load_balancer_id: # Monitoring policy ID monitoring_policy_id: - # Baremetal model ID - baremetal_model_id: - # Server type - server_type: - default cloud Set ``deploy`` to False if Salt should not be installed on the node. @@ -93,25 +89,6 @@ sudo salt-cloud -f create_block_storage my-oneandone-config name='SaltTest2' description='SaltTestDescription' size=50 datacenter_id='5091F6D8CBFEF9C26ACE957C652D5D49' -Create a firewall policy - -.. code-block:: bash - - sudo salt-cloud -f create_firewall_policy oneandone name='1salttest' - description='salt_test_desc' rules='[{"protocol":"TCP", "port":"80", "description":"salt_fw_rule_desc"}]' - -List baremetal models - -.. code-block:: bash - - sudo salt-cloud -f baremetal_models oneandone - -List baremetal images - -.. code-block:: bash - - sudo salt-cloud -f avail_baremetal_images oneandone - ''' # Import python libs @@ -120,7 +97,6 @@ import os import pprint import time -import json # Import salt libs import salt.config as config @@ -140,7 +116,7 @@ try: from oneandone.client import ( - OneAndOneService, FirewallPolicy, FirewallPolicyRule, Server, Hdd, SshKey, BlockStorage + OneAndOneService, Server, Hdd, BlockStorage, SshKey ) HAS_ONEANDONE = True except ImportError: @@ -269,8 +245,8 @@ def create_block_storage(kwargs=None, call=None): ''' if call == 'action': raise SaltCloudSystemExit( - 'The create_block_storage function must be called with ' - '-f or --function' + 'The avail_locations function must be called with ' + '-f or --function, or with the --list-locations option' ) conn = get_conn() @@ -333,8 +309,8 @@ def create_ssh_key(kwargs=None, call=None): ''' if call == 'action': raise SaltCloudSystemExit( - 'The create_ssh_key function must be called with ' - '-f or --function' + 'The avail_locations function must be called with ' + '-f or --function, or with the --list-locations option' ) conn = get_conn() @@ -347,64 +323,6 @@ def create_ssh_key(kwargs=None, call=None): return {'SshKey': data} -def _get_firewall_policy(kwargs): - ''' - Construct FirewallPolicy and FirewallPolicy instances from passed arguments - ''' - fp_name = kwargs.get('name', None) - fp_description = kwargs.get('description', None) - firewallPolicy = FirewallPolicy( - name=fp_name, - description=fp_description - ) - - fpr_json = kwargs.get('rules', None) - jdata = json.loads(fpr_json) - rules = [] - for fwpr in jdata: - firewallPolicyRule = FirewallPolicyRule() - if 'protocol' in fwpr: - firewallPolicyRule.rule_set['protocol'] = fwpr['protocol'] - if 'port_from' in fwpr: - firewallPolicyRule.rule_set['port_from'] = fwpr['port_from'] - if 'port_to' in fwpr: - firewallPolicyRule.rule_set['port_to'] = fwpr['port_to'] - if 'source' in fwpr: - firewallPolicyRule.rule_set['source'] = fwpr['source'] - if 'action' in fwpr: - firewallPolicyRule.rule_set['action'] = fwpr['action'] - if 'description' in fwpr: - firewallPolicyRule.rule_set['description'] = fwpr['description'] - if 'port' in fwpr: - firewallPolicyRule.rule_set['port'] = fwpr['port'] - rules.append(firewallPolicyRule) - - return {'firewall_policy': firewallPolicy, 'firewall_policy_rules': rules} - - -def create_firewall_policy(kwargs=None, call=None): - ''' - Create a firewall policy - ''' - if call == 'action': - raise SaltCloudSystemExit( - 'The create_firewall_policy function must be called with ' - '-f or --function' - ) - - conn = get_conn() - - # Assemble the composite FirewallPolicy and FirewallPolicyRule[] objects. - getFwpResult = _get_firewall_policy(kwargs) - - data = conn.create_firewall_policy( - firewall_policy=getFwpResult['firewall_policy'], - firewall_policy_rules=getFwpResult['firewall_policy_rules'] - ) - - return {'FirewallPolicy': data} - - def avail_images(conn=None, call=None): ''' Return a list of the server appliances that are on the provider @@ -426,27 +344,6 @@ def avail_images(conn=None, call=None): return ret -def avail_baremetal_images(conn=None, call=None): - ''' - Return a list of the baremetal server appliances that are on the provider - ''' - if call == 'action': - raise SaltCloudSystemExit( - 'The avail_baremetal_images function must be called with ' - '-f or --function' - ) - - if not conn: - conn = get_conn() - - ret = {} - - for appliance in conn.list_appliances(q='BAREMETAL'): - ret[appliance['name']] = appliance - - return ret - - def avail_sizes(call=None): ''' Return a dict of all available VM sizes on the cloud provider with @@ -465,23 +362,6 @@ def avail_sizes(call=None): return sizes -def baremetal_models(call=None): - ''' - Return a dict of all available baremetal models with relevant data. - ''' - if call == 'action': - raise SaltCloudSystemExit( - 'The baremetal_models function must be called with ' - '-f or --function' - ) - - conn = get_conn() - - bmodels = conn.list_baremetal_models() - - return bmodels - - def script(vm_): ''' Return the script deployment object @@ -604,20 +484,16 @@ def _get_server(vm_): ssh_key = load_public_key(vm_) - server_type = config.get_cloud_config_value( - 'server_type', vm_, __opts__, default='cloud', - search_global=False - ) vcore = None cores_per_processor = None ram = None fixed_instance_size_id = None - baremetal_model_id = None if 'fixed_instance_size' in vm_: fixed_instance_size = get_size(vm_) fixed_instance_size_id = fixed_instance_size['id'] - elif 'vm_core' in vm_ and 'cores_per_processor' in vm_ and 'ram' in vm_ and 'hdds' in vm_: + elif (vm_['vcore'] and vm_['cores_per_processor'] and + vm_['ram'] and vm_['hdds']): vcore = config.get_cloud_config_value( 'vcore', vm_, __opts__, default=None, search_global=False @@ -630,16 +506,9 @@ def _get_server(vm_): 'ram', vm_, __opts__, default=None, search_global=False ) - elif 'baremetal_model_id' in vm_ and server_type == 'baremetal': - baremetal_model_id = config.get_cloud_config_value( - 'baremetal_model_id', vm_, __opts__, default=None, - search_global=False - ) else: - raise SaltCloudConfigError("'fixed_instance_size' or 'vcore', " - "'cores_per_processor', 'ram', and 'hdds' " - "must be provided for 'cloud' server. " - "For 'baremetal' server, 'baremetal_model_id'" + raise SaltCloudConfigError("'fixed_instance_size' or 'vcore'," + "'cores_per_processor', 'ram', and 'hdds'" "must be provided.") appliance_id = config.get_cloud_config_value( @@ -688,7 +557,7 @@ def _get_server(vm_): ) public_key = config.get_cloud_config_value( - 'public_key_ids', vm_, __opts__, default=None, + 'public_key_ids', vm_, __opts__, default=True, search_global=False ) @@ -710,9 +579,7 @@ def _get_server(vm_): datacenter_id=datacenter_id, rsa_key=ssh_key, private_network_id=private_network_id, - public_key=public_key, - server_type=server_type, - baremetal_model_id=baremetal_model_id + public_key=public_key ) @@ -760,8 +627,7 @@ def create(vm_): # Assemble the composite server object. server = _get_server(vm_) - if not bool(server.specs['hardware']['fixed_instance_size_id'])\ - and not bool(server.specs['server_type'] == 'baremetal'): + if not bool(server.specs['hardware']['fixed_instance_size_id']): # Assemble the hdds object. hdds = _get_hdds(vm_) diff --git a/salt/cloud/clouds/opennebula.py b/salt/cloud/clouds/opennebula.py index eb58e7b24199..566222e3be28 100644 --- a/salt/cloud/clouds/opennebula.py +++ b/salt/cloud/clouds/opennebula.py @@ -1142,6 +1142,7 @@ def __query_node_data(vm_name): # if IPv6 is used try this as last resort # OpenNebula does not yet show ULA address here so take global private_ip = data['template']['nic']['ip6_global'] + vm_['ssh_host'] = private_ip ssh_username = config.get_cloud_config_value( diff --git a/salt/cloud/clouds/openstack.py b/salt/cloud/clouds/openstack.py index 8faf7dd04795..30c950e66ddd 100644 --- a/salt/cloud/clouds/openstack.py +++ b/salt/cloud/clouds/openstack.py @@ -273,6 +273,12 @@ def get_dependencies(): ''' Warn if dependencies aren't met. ''' + if not HAS_SHADE: + log.warning('"shade" not found') + return False + elif hasattr(HAS_SHADE, '__len__') and not HAS_SHADE[0]: + log.warning(HAS_SHADE[1]) + return False deps = { 'shade': shade[0], 'os_client_config': shade[0], diff --git a/salt/cloud/clouds/proxmox.py b/salt/cloud/clouds/proxmox.py index bd3fa9e72bdb..cde89860826e 100644 --- a/salt/cloud/clouds/proxmox.py +++ b/salt/cloud/clouds/proxmox.py @@ -18,7 +18,6 @@ user: myuser@pam or myuser@pve password: mypassword url: hypervisor.domain.tld - port: 8006 driver: proxmox verify_ssl: True @@ -106,7 +105,6 @@ def get_dependencies(): url = None -port = None ticket = None csrf = None verify_ssl = None @@ -117,14 +115,10 @@ def _authenticate(): ''' Retrieve CSRF and API tickets for the Proxmox API ''' - global url, port, ticket, csrf, verify_ssl + global url, ticket, csrf, verify_ssl url = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) - port = config.get_cloud_config_value( - 'port', get_configured_provider(), __opts__, - default=8006, search_global=False - ) username = config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), @@ -137,7 +131,7 @@ def _authenticate(): ) connect_data = {'username': username, 'password': passwd} - full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port) + full_url = 'https://{0}:8006/api2/json/access/ticket'.format(url) returned_data = requests.post( full_url, verify=verify_ssl, data=connect_data).json() @@ -154,7 +148,7 @@ def query(conn_type, option, post_data=None): log.debug('Not authenticated yet, doing that now..') _authenticate() - full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) + full_url = 'https://{0}:8006/api2/json/{1}'.format(url, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) @@ -326,42 +320,22 @@ def get_resources_vms(call=None, resFilter=None, includeConfig=True): salt-cloud -f get_resources_vms my-proxmox-config ''' + log.debug('Getting resource: vms.. (filter: %s)', resFilter) + resources = query('get', 'cluster/resources') - timeoutTime = time.time() + 60 - while True: - log.debug('Getting resource: vms.. (filter: %s)', resFilter) - resources = query('get', 'cluster/resources') - ret = {} - badResource = False - for resource in resources: - if 'type' in resource and resource['type'] in ['openvz', 'qemu', - 'lxc']: - try: - name = resource['name'] - except KeyError: - badResource = True - log.debug('No name in VM resource %s', repr(resource)) - break - - ret[name] = resource - - if includeConfig: - # Requested to include the detailed configuration of a VM - ret[name]['config'] = get_vmconfig( - ret[name]['vmid'], - ret[name]['node'], - ret[name]['type'] - ) - - if time.time() > timeoutTime: - raise SaltCloudExecutionTimeout('FAILED to get the proxmox ' - 'resources vms') - - # Carry on if there wasn't a bad resource return from Proxmox - if not badResource: - break - - time.sleep(0.5) + ret = {} + for resource in resources: + if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']: + name = resource['name'] + ret[name] = resource + + if includeConfig: + # Requested to include the detailed configuration of a VM + ret[name]['config'] = get_vmconfig( + ret[name]['vmid'], + ret[name]['node'], + ret[name]['type'] + ) if resFilter is not None: log.debug('Filter given: %s, returning requested ' @@ -518,41 +492,6 @@ def list_nodes_select(call=None): ) -def _stringlist_to_dictionary(input_string): - ''' - Convert a stringlist (comma separated settings) to a dictionary - - The result of the string setting1=value1,setting2=value2 will be a python dictionary: - - {'setting1':'value1','setting2':'value2'} - ''' - li = str(input_string).split(',') - ret = {} - for item in li: - pair = str(item).replace(' ', '').split('=') - if len(pair) != 2: - log.warning('Cannot process stringlist item %s', item) - continue - - ret[pair[0]] = pair[1] - return ret - - -def _dictionary_to_stringlist(input_dict): - ''' - Convert a dictionary to a stringlist (comma separated settings) - - The result of the dictionary {'setting1':'value1','setting2':'value2'} will be: - - setting1=value1,setting2=value2 - ''' - string_value = "" - for s in input_dict: - string_value += "{0}={1},".format(s, input_dict[s]) - string_value = string_value[:-1] - return string_value - - def create(vm_): ''' Create a single VM from a data dict @@ -579,8 +518,7 @@ def create(vm_): 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), - args=__utils__['cloud.filter_event']( - 'creating', vm_, ['name', 'profile', 'provider', 'driver']), + args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) @@ -621,172 +559,22 @@ def create(vm_): host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) - if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: - # Determine which IP to use in order of preference: - if 'ip_address' in vm_: - ip_address = six.text_type(vm_['ip_address']) - elif 'public_ips' in data: - ip_address = six.text_type(data['public_ips'][0]) # first IP - elif 'private_ips' in data: - ip_address = six.text_type(data['private_ips'][0]) # first IP - else: - raise SaltCloudExecutionFailure("Could not determine an IP address to use") + # Determine which IP to use in order of preference: + if 'ip_address' in vm_: + ip_address = six.text_type(vm_['ip_address']) + elif 'public_ips' in data: + ip_address = six.text_type(data['public_ips'][0]) # first IP + elif 'private_ips' in data: + ip_address = six.text_type(data['private_ips'][0]) # first IP + else: + raise SaltCloudExecutionFailure # err.. not a good idea i reckon + + log.debug('Using IP address %s', ip_address) # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} - if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': - # If we cloned a machine, see if we need to reconfigure any of the options such as net0, - # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's - # brought up - - log.info('Configuring cloned VM') - - # Modify the settings for the VM one at a time so we can see any problems with the values - # as quickly as possible - for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': - if setting in vm_: # if the property is set, use it for the VM request - postParams = {} - postParams[setting] = vm_[setting] - query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) - - # cloud-init settings - for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': - if setting in vm_: # if the property is set, use it for the VM request - postParams = {} - postParams[setting] = vm_[setting] - query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) - - for setting_number in range(3): - setting = 'ide{0}'.format(setting_number) - if setting in vm_: - postParams = {} - postParams[setting] = vm_[setting] - query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) - - for setting_number in range(5): - setting = 'sata{0}'.format(setting_number) - if setting in vm_: - vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) - if setting in vm_config: - setting_params = vm_[setting] - setting_storage = setting_params.split(':')[0] - setting_size = _stringlist_to_dictionary(setting_params)['size'] - vm_disk_params = vm_config[setting] - vm_disk_storage = vm_disk_params.split(':')[0] - vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] - # if storage is different, move the disk - if setting_storage != vm_disk_storage: - postParams = {} - postParams['disk'] = setting - postParams['storage'] = setting_storage - postParams['delete'] = 1 - node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( - vm_['host'], vmid), postParams) - data = _parse_proxmox_upid(node, vm_) - # wait until the disk has been moved - if not wait_for_task(data['upid'], timeout=300): - return {'Error': 'Unable to move disk {0}, command timed out'.format( - setting)} - # if storage is different, move the disk - if setting_size != vm_disk_size: - postParams = {} - postParams['disk'] = setting - postParams['size'] = setting_size - query('put', 'nodes/{0}/qemu/{1}/resize'.format( - vm_['host'], vmid), postParams) - else: - postParams = {} - postParams[setting] = vm_[setting] - query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) - - for setting_number in range(13): - setting = 'scsi{0}'.format(setting_number) - if setting in vm_: - vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) - if setting in vm_config: - setting_params = vm_[setting] - setting_storage = setting_params.split(':')[0] - setting_size = _stringlist_to_dictionary(setting_params)['size'] - vm_disk_params = vm_config[setting] - vm_disk_storage = vm_disk_params.split(':')[0] - vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] - # if storage is different, move the disk - if setting_storage != vm_disk_storage: - postParams = {} - postParams['disk'] = setting - postParams['storage'] = setting_storage - postParams['delete'] = 1 - node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( - vm_['host'], vmid), postParams) - data = _parse_proxmox_upid(node, vm_) - # wait until the disk has been moved - if not wait_for_task(data['upid'], timeout=300): - return {'Error': 'Unable to move disk {0}, command timed out'.format( - setting)} - # if storage is different, move the disk - if setting_size != vm_disk_size: - postParams = {} - postParams['disk'] = setting - postParams['size'] = setting_size - query('put', 'nodes/{0}/qemu/{1}/resize'.format( - vm_['host'], vmid), postParams) - else: - postParams = {} - postParams[setting] = vm_[setting] - query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) - - # net strings are a list of comma seperated settings. We need to merge the settings so that - # the setting in the profile only changes the settings it touches and the other settings - # are left alone. An example of why this is necessary is because the MAC address is set - # in here and generally you don't want to alter or have to know the MAC address of the new - # instance, but you may want to set the VLAN bridge for example - for setting_number in range(20): - setting = 'net{0}'.format(setting_number) - if setting in vm_: - data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) - - # Generate a dictionary of settings from the existing string - new_setting = {} - if setting in data: - new_setting.update(_stringlist_to_dictionary(data[setting])) - - # Merge the new settings (as a dictionary) into the existing dictionary to get the - # new merged settings - new_setting.update(_stringlist_to_dictionary(vm_[setting])) - - # Convert the dictionary back into a string list - postParams = {setting: _dictionary_to_stringlist(new_setting)} - query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) - - for setting_number in range(20): - setting = 'ipconfig{0}'.format(setting_number) - if setting in vm_: - data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) - - # Generate a dictionary of settings from the existing string - new_setting = {} - if setting in data: - new_setting.update(_stringlist_to_dictionary(data[setting])) - - # Merge the new settings (as a dictionary) into the existing dictionary to get the - # new merged settings - if setting_number == 0 and 'ip_address' in vm_: - if 'gw' in _stringlist_to_dictionary(vm_[setting]): - new_setting.update(_stringlist_to_dictionary( - 'ip={0}/24,gw={1}'.format( - vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) - else: - new_setting.update( - _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) - else: - new_setting.update(_stringlist_to_dictionary(vm_[setting])) - - # Convert the dictionary back into a string list - postParams = {setting: _dictionary_to_stringlist(new_setting)} - query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) - # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) @@ -797,42 +585,6 @@ def create(vm_): if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} - # For QEMU VMs, we can get the IP Address from qemu-agent - if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: - def __find_agent_ip(vm_): - log.debug("Waiting for qemu-agent to start...") - endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) - interfaces = query('get', endpoint) - # If we get a result from the agent, parse it - if 'result' in interfaces: - for interface in interfaces['result']: - if_name = interface['name'] - # Only check ethernet type interfaces, as they are not returned in any order - if if_name.startswith('eth') or if_name.startswith('ens'): - for if_addr in interface['ip-addresses']: - ip_addr = if_addr['ip-address'] - # Ensure interface has a valid IPv4 address - if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: - return six.text_type(ip_addr) - raise SaltCloudExecutionFailure - - # We have to wait for a bit for qemu-agent to start - try: - ip_address = __utils__['cloud.wait_for_fun']( - __find_agent_ip, - vm_=vm_ - ) - except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: - try: - # If VM was created but we can't connect, destroy it. - destroy(vm_['name']) - except SaltCloudSystemExit: - pass - finally: - raise SaltCloudSystemExit(six.text_type(exc)) - - log.debug('Using IP address %s', ip_address) - ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) @@ -859,8 +611,7 @@ def __find_agent_ip(vm_): 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), - args=__utils__['cloud.filter_event']( - 'created', vm_, ['name', 'profile', 'provider', 'driver']), + args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) @@ -874,7 +625,7 @@ def _import_api(): Load this json content into global variable "api" ''' global api - full_url = 'https://{0}:{1}/pve-docs/api-viewer/apidoc.js'.format(url, port) + full_url = 'https://{0}:8006/pve-docs/api-viewer/apidoc.js'.format(url) returned_data = requests.get(full_url, verify=verify_ssl) re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE) @@ -898,7 +649,7 @@ def _get_properties(path="", method="GET", forced_params=None): for elem in path_levels[:-1]: search_path += '/' + elem # Lookup for a dictionary with path = "requested path" in list" and return its children - sub = (item for item in sub if item["path"] == search_path).next()['children'] + sub = next((item for item in sub if item["path"] == search_path))['children'] # Get leaf element in path search_path += '/' + path_levels[-1] sub = next((item for item in sub if item["path"] == search_path)) @@ -958,8 +709,7 @@ def create_node(vm_, newid): newnode['ostemplate'] = vm_['image'] # optional VZ settings - for prop in ['cpus', 'disk', 'ip_address', 'nameserver', - 'password', 'swap', 'poolid', 'storage']: + for prop in 'cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage': if prop in vm_: # if the property is set, use it for the VM request newnode[prop] = vm_[prop] @@ -968,8 +718,8 @@ def create_node(vm_, newid): newnode['hostname'] = vm_['name'] newnode['ostemplate'] = vm_['image'] - static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', - 'onboot', 'net0', 'password', 'nameserver', 'swap', 'storage', 'rootfs') + static_props = ('cpuunits', 'cpulimit', 'rootfs', 'cores', 'description', 'memory', 'onboot', 'net0', + 'password', 'nameserver', 'swap', 'storage', 'rootfs') for prop in _get_properties('/nodes/{node}/lxc', 'POST', static_props): @@ -997,8 +747,7 @@ def create_node(vm_, newid): elif vm_['technology'] == 'qemu': # optional Qemu settings - static_props = ( - 'acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') + static_props = ('acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0') for prop in _get_properties('/nodes/{node}/qemu', 'POST', static_props): @@ -1025,18 +774,7 @@ def create_node(vm_, newid): if 'clone_' + prop in vm_: # if the property is set, use it for the VM request postParams[prop] = vm_['clone_' + prop] - if 'host' in vm_: - postParams['target'] = vm_['host'] - - try: - int(vm_['clone_from']) - except ValueError: - if ':' in vm_['clone_from']: - vmhost = vm_['clone_from'].split(':')[0] - vm_['clone_from'] = vm_['clone_from'].split(':')[1] - - node = query('post', 'nodes/{0}/qemu/{1}/clone'.format( - vmhost, vm_['clone_from']), postParams) + node = query('post', 'nodes/{0}/qemu/{1}/clone'.format(vmhost, vm_['clone_from']), postParams) else: node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode) return _parse_proxmox_upid(node, vm_) @@ -1119,28 +857,6 @@ def wait_for_state(vmid, state, timeout=300): node['name'], node['status'], state) -def wait_for_task(upid, timeout=300): - ''' - Wait until a the task has been finished successfully - ''' - start_time = time.time() - info = _lookup_proxmox_task(upid) - if not info: - log.error('wait_for_task: No task information ' - 'retrieved based on given criteria.') - raise SaltCloudExecutionFailure - - while True: - if 'status' in info and info['status'] == 'OK': - log.debug('Task has been finished!') - return True - time.sleep(3) # Little more patience, we're not in a hurry - if time.time() - start_time > timeout: - log.debug('Timeout reached while waiting for task to be finished') - return False - info = _lookup_proxmox_task(upid) - - def destroy(name, call=None): ''' Destroy a node. @@ -1192,8 +908,7 @@ def destroy(name, call=None): transport=__opts__['transport'] ) if __opts__.get('update_cachedir', False) is True: - __utils__['cloud.delete_minion_cachedir']( - name, __active_provider_name__.split(':')[0], __opts__) + __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return {'Destroyed': '{0} was destroyed.'.format(name)} @@ -1220,7 +935,7 @@ def set_vm_status(status, name=None, vmid=None): log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj) data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format( - vmobj['node'], vmobj['type'], vmobj['vmid'], status)) + vmobj['node'], vmobj['type'], vmobj['vmid'], status)) result = _parse_proxmox_upid(data, vmobj) @@ -1251,7 +966,7 @@ def get_vm_status(vmid=None, name=None): log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)", vmobj['name']) data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format( - vmobj['node'], vmobj['type'], vmobj['vmid'])) + vmobj['node'], vmobj['type'], vmobj['vmid'])) return data log.error('VM or requested status not found..') diff --git a/salt/cloud/clouds/saltify.py b/salt/cloud/clouds/saltify.py index adebdb0ac46c..10ead0c7adb7 100644 --- a/salt/cloud/clouds/saltify.py +++ b/salt/cloud/clouds/saltify.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- ''' -.. _saltify-module: +.. _`saltify-module`: Saltify Module ============== @@ -248,10 +248,6 @@ def create(vm_): deploy_config = config.get_cloud_config_value( 'deploy', vm_, __opts__, default=False) - # If ssh_host is not set, default to the minion name - if not config.get_cloud_config_value('ssh_host', vm_, __opts__, default=''): - vm_['ssh_host'] = vm_['name'] - if deploy_config: wol_mac = config.get_cloud_config_value( 'wake_on_lan_mac', vm_, __opts__, default='') @@ -304,7 +300,7 @@ def get_configured_provider(): def _verify(vm_): ''' - Verify credentials for an existing system + Verify credentials for an exsiting system ''' log.info('Verifying credentials for %s', vm_['name']) diff --git a/salt/cloud/clouds/scaleway.py b/salt/cloud/clouds/scaleway.py index 2fbbed5783c9..ebe2636504fe 100644 --- a/salt/cloud/clouds/scaleway.py +++ b/salt/cloud/clouds/scaleway.py @@ -78,7 +78,7 @@ def avail_images(call=None): '-f or --function, or with the --list-images option' ) - items = query(method='images', root='marketplace_root') + items = query(method='images') ret = {} for image in items['images']: ret[image['id']] = {} @@ -171,14 +171,14 @@ def get_image(server_): def create_node(args): ''' Create a node. ''' - node = query(method='servers', args=args, http_method='POST') + node = query(method='servers', args=args, http_method='post') action = query( method='servers', server_id=node['server']['id'], command='action', args={'action': 'poweron'}, - http_method='POST' + http_method='post' ) return node @@ -314,21 +314,15 @@ def __query_node_data(server_name): def query(method='servers', server_id=None, command=None, args=None, - http_method='GET', root='api_root'): + http_method='get'): ''' Make a call to the Scaleway API. ''' - - if root == 'api_root': - default_url = 'https://cp-par1.scaleway.com' - else: - default_url = 'https://api-marketplace.scaleway.com' - base_path = six.text_type(config.get_cloud_config_value( - root, + 'api_root', get_configured_provider(), __opts__, search_global=False, - default=default_url + default='https://api.cloud.online.net' )) path = '{0}/{1}/'.format(base_path, method) @@ -351,29 +345,25 @@ def query(method='servers', server_id=None, command=None, args=None, request = __utils__["http.query"](path, method=http_method, data=data, - status=True, - decode=True, - decode_type='json', - data_render=True, - data_renderer='json', - headers=True, - header_dict={'X-Auth-Token': token, - 'User-Agent': "salt-cloud", - 'Content-Type': 'application/json'}) - if request['status'] > 299: + headers={'X-Auth-Token': token, + 'User-Agent': "salt-cloud", + 'Content-Type': 'application/json'}) + if request.status_code > 299: raise SaltCloudSystemExit( 'An error occurred while querying Scaleway. HTTP Code: {0} ' 'Error: \'{1}\''.format( - request['status'], - request['error'] + request.status_code, + request.text ) ) + log.debug(request.url) + # success without data - if request['status'] == 204: + if request.status_code == 204: return True - return salt.utils.json.loads(request['body']) + return request.json() def script(server_): @@ -441,7 +431,7 @@ def destroy(name, call=None): data = show_instance(name, call='action') node = query( method='servers', server_id=data['id'], command='action', - args={'action': 'terminate'}, http_method='POST' + args={'action': 'terminate'}, http_method='post' ) __utils__['cloud.fire_event']( diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 3b32113a3e06..1d51674df1ed 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -126,12 +126,10 @@ # Import salt libs import salt.utils.cloud -import salt.utils.master import salt.utils.network import salt.utils.stringutils import salt.utils.xmlutil import salt.utils.vmware -from salt._compat import ipaddress from salt.exceptions import SaltCloudSystemExit # Import salt cloud libs @@ -992,69 +990,43 @@ def _wait_for_vmware_tools(vm_ref, max_wait): def _valid_ip(ip_address): ''' - Check if the IP address is valid and routable + Check if the IP address is valid Return either True or False ''' - try: - address = ipaddress.IPv4Address(ip_address) - except ipaddress.AddressValueError: - return False - - if address.is_unspecified or \ - address.is_loopback or \ - address.is_link_local or \ - address.is_multicast or \ - address.is_reserved: + # Make sure IP has four octets + octets = ip_address.split('.') + if len(octets) != 4: return False - return True - + # convert octet from string to int + for i, octet in enumerate(octets): -def _valid_ip6(ip_address): - ''' - Check if the IPv6 address is valid and routable - Return either True or False - ''' + try: + octets[i] = int(octet) + except ValueError: + # couldn't convert octet to an integer + return False - # Validate IPv6 address - try: - address = ipaddress.IPv6Address(ip_address) - except ipaddress.AddressValueError: - return False + # map variables to elements of octets list + first_octet, second_octet, third_octet, fourth_octet = octets - if address.is_unspecified or \ - address.is_loopback or \ - address.is_link_local or \ - address.is_multicast or \ - address.is_reserved: + # Check first_octet meets conditions + if first_octet < 1 or first_octet > 223 or first_octet == 127: return False - if address.ipv4_mapped is not None: + # Check 169.254.X.X condition + if first_octet == 169 and second_octet == 254: return False + # Check 2nd - 4th octets + for octet in (second_octet, third_octet, fourth_octet): + if (octet < 0) or (octet > 255): + return False + # Passed all of the checks return True -def _master_supports_ipv6(): - ''' - Check if the salt master has a valid and - routable IPv6 address available - ''' - master_fqdn = salt.utils.network.get_fqhostname() - pillar_util = salt.utils.master.MasterPillarUtil(master_fqdn, - tgt_type='glob', - use_cached_grains=False, - grains_fallback=False, - opts=__opts__) - grains_data = pillar_util.get_minion_grains() - ipv6_addresses = grains_data[master_fqdn]['ipv6'] - for address in ipv6_addresses: - if _valid_ip6(address): - return True - return False - - def _wait_for_ip(vm_ref, max_wait): max_wait_vmware_tools = max_wait max_wait_ip = max_wait @@ -1069,43 +1041,34 @@ def _wait_for_ip(vm_ref, max_wait): if isinstance(resolved_ips, list) and resolved_ips: return resolved_ips[0] return False - master_supports_ipv6 = _master_supports_ipv6() - log.info( - "[ %s ] Master has IPv6 support: %s", - vm_ref.name, master_supports_ipv6 - ) time_counter = 0 starttime = time.time() - ipv4_address = None while time_counter < max_wait_ip: if time_counter % 5 == 0: log.info( - "[ %s ] Waiting to retrieve IPv4/6 information [%s s]", + "[ %s ] Waiting to retrieve IPv4 information [%s s]", vm_ref.name, time_counter ) + if vm_ref.summary.guest.ipAddress and _valid_ip(vm_ref.summary.guest.ipAddress): + log.info( + "[ %s ] Successfully retrieved IPv4 information in %s seconds", + vm_ref.name, time_counter + ) + return vm_ref.summary.guest.ipAddress for net in vm_ref.guest.net: if net.ipConfig.ipAddress: for current_ip in net.ipConfig.ipAddress: - if master_supports_ipv6 and _valid_ip6(current_ip.ipAddress): + if _valid_ip(current_ip.ipAddress): log.info( - "[ %s ] Successfully retrieved IPv6 information " + "[ %s ] Successfully retrieved IPv4 information " "in %s seconds", vm_ref.name, time_counter ) return current_ip.ipAddress - if _valid_ip(current_ip.ipAddress) and not ipv4_address: - # Delay return in case we have a valid IPv6 available - ipv4_address = current_ip - if ipv4_address: - log.info( - "[ %s ] Successfully retrieved IPv4 information " - "in %s seconds", vm_ref.name, time_counter - ) - return ipv4_address.ipAddress time.sleep(1.0 - ((time.time() - starttime) % 1.0)) time_counter += 1 log.warning( - "[ %s ] Timeout Reached. Unable to retrieve IPv4/6 information after " + "[ %s ] Timeout Reached. Unable to retrieve IPv4 information after " "waiting for %s seconds", vm_ref.name, max_wait_ip ) return False @@ -1642,52 +1605,6 @@ def list_datastores(kwargs=None, call=None): return {'Datastores': salt.utils.vmware.list_datastores(_get_si())} -def list_datastores_full(kwargs=None, call=None): - ''' - List all the datastores for this VMware environment, with extra information - - CLI Example: - - .. code-block:: bash - - salt-cloud -f list_datastores_full my-vmware-config - ''' - if call != 'function': - raise SaltCloudSystemExit( - 'The list_datastores_full function must be called with ' - '-f or --function.' - ) - - return {'Datastores': salt.utils.vmware.list_datastores_full(_get_si())} - - -def list_datastore_full(kwargs=None, call=None, datastore=None): - ''' - Returns a dictionary with basic information for the given datastore - - CLI Example: - - .. code-block:: bash - - salt-cloud -f list_datastore_full my-vmware-config datastore=datastore-name - ''' - if call != 'function': - raise SaltCloudSystemExit( - 'The list_datastore_full function must be called with ' - '-f or --function.' - ) - - if kwargs: - datastore = kwargs.get('datastore', None) - - if not datastore: - raise SaltCloudSystemExit( - 'The list_datastore_full function requires a datastore' - ) - - return {datastore: salt.utils.vmware.list_datastore_full(_get_si(), datastore)} - - def list_hosts(kwargs=None, call=None): ''' List all the hosts for this VMware environment @@ -2633,33 +2550,11 @@ def create(vm_): win_run_once = config.get_cloud_config_value( 'win_run_once', vm_, __opts__, search_global=False, default=None ) - win_ad_domain = config.get_cloud_config_value( - 'win_ad_domain', vm_, __opts__, search_global=False, default='' - ) - win_ad_user = config.get_cloud_config_value( - 'win_ad_user', vm_, __opts__, search_global=False, default='' - ) - win_ad_password = config.get_cloud_config_value( - 'win_ad_password', vm_, __opts__, search_global=False, default='' - ) - win_autologon = config.get_cloud_config_value( - 'win_autologon', vm_, __opts__, search_global=False, default=True - ) - timezone = config.get_cloud_config_value( - 'timezone', vm_, __opts__, search_global=False, default='' - ) - hw_clock_utc = config.get_cloud_config_value( - 'hw_clock_utc', vm_, __opts__, search_global=False, default='' - ) - clonefrom_datacenter = config.get_cloud_config_value( - 'clonefrom_datacenter', vm_, __opts__, search_global=False, default=datacenter - ) # Get service instance object si = _get_si() container_ref = None - clonefrom_datacenter_ref = None # If datacenter is specified, set the container reference to start search from it instead if datacenter: @@ -2675,22 +2570,13 @@ def create(vm_): datacenter ) container_ref = datacenter_ref if datacenter_ref else None - clonefrom_container_ref = datacenter_ref if datacenter_ref else None - # allow specifying a different datacenter that the template lives in - if clonefrom_datacenter: - clonefrom_datacenter_ref = salt.utils.vmware.get_mor_by_property( - si, - vim.Datacenter, - clonefrom_datacenter - ) - clonefrom_container_ref = clonefrom_datacenter_ref if clonefrom_datacenter_ref else None # Clone VM/template from specified VM/template object_ref = salt.utils.vmware.get_mor_by_property( si, vim.VirtualMachine, vm_['clonefrom'], - container_ref=clonefrom_container_ref + container_ref=container_ref ) if object_ref: clone_type = "template" if object_ref.config.template else "vm" @@ -2928,23 +2814,14 @@ def create(vm_): identity = vim.vm.customization.LinuxPrep() identity.hostName = vim.vm.customization.FixedName(name=host_name) identity.domain = domain_name - if timezone: - identity.timeZone = timezone - if isinstance(hw_clock_utc, bool): - identity.hwClockUTC = hw_clock_utc else: identity = vim.vm.customization.Sysprep() identity.guiUnattended = vim.vm.customization.GuiUnattended() - identity.guiUnattended.autoLogon = win_autologon - if win_autologon: - identity.guiUnattended.autoLogonCount = 1 - else: - identity.guiUnattended.autoLogonCount = 0 + identity.guiUnattended.autoLogon = True + identity.guiUnattended.autoLogonCount = 1 identity.guiUnattended.password = vim.vm.customization.Password() identity.guiUnattended.password.value = win_password identity.guiUnattended.password.plainText = plain_text - if timezone: - identity.guiUnattended.timeZone = timezone if win_run_once: identity.guiRunOnce = vim.vm.customization.GuiRunOnce() identity.guiRunOnce.commandList = win_run_once @@ -2954,12 +2831,6 @@ def create(vm_): identity.userData.computerName = vim.vm.customization.FixedName() identity.userData.computerName.name = host_name identity.identification = vim.vm.customization.Identification() - if win_ad_domain and win_ad_user and win_ad_password: - identity.identification.joinDomain = win_ad_domain - identity.identification.domainAdmin = win_ad_user - identity.identification.domainAdminPassword = vim.vm.customization.Password() - identity.identification.domainAdminPassword.value = win_ad_password - identity.identification.domainAdminPassword.plainText = plain_text custom_spec = vim.vm.customization.Specification( globalIPSettings=global_ip, identity=identity, @@ -4466,7 +4337,7 @@ def reboot_host(kwargs=None, call=None): 'Specified host system does not support reboot.' ) - if not host_ref.runtime.inMaintenanceMode and not force: + if not host_ref.runtime.inMaintenanceMode: raise SaltCloudSystemExit( 'Specified host system is not in maintenance mode. Specify force=True to ' 'force reboot even if there are virtual machines running or other operations ' @@ -4548,73 +4419,3 @@ def create_datastore_cluster(kwargs=None, call=None): return False return {datastore_cluster_name: 'created'} - - -def shutdown_host(kwargs=None, call=None): - ''' - Shut down the specified host system in this VMware environment - - .. note:: - - If the host system is not in maintenance mode, it will not be shut down. If you - want to shut down the host system regardless of whether it is in maintenance mode, - set ``force=True``. Default is ``force=False``. - - CLI Example: - - .. code-block:: bash - - salt-cloud -f shutdown_host my-vmware-config host="myHostSystemName" [force=True] - ''' - if call != 'function': - raise SaltCloudSystemExit( - 'The shutdown_host function must be called with ' - '-f or --function.' - ) - - host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None - force = _str_to_bool(kwargs.get('force')) if kwargs and 'force' in kwargs else False - - if not host_name: - raise SaltCloudSystemExit( - 'You must specify name of the host system.' - ) - - # Get the service instance - si = _get_si() - - host_ref = salt.utils.vmware.get_mor_by_property(si, vim.HostSystem, host_name) - if not host_ref: - raise SaltCloudSystemExit( - 'Specified host system does not exist.' - ) - - if host_ref.runtime.connectionState == 'notResponding': - raise SaltCloudSystemExit( - 'Specified host system cannot be shut down in it\'s current state (not responding).' - ) - - if not host_ref.capability.rebootSupported: - raise SaltCloudSystemExit( - 'Specified host system does not support shutdown.' - ) - - if not host_ref.runtime.inMaintenanceMode and not force: - raise SaltCloudSystemExit( - 'Specified host system is not in maintenance mode. Specify force=True to ' - 'force reboot even if there are virtual machines running or other operations ' - 'in progress.' - ) - - try: - host_ref.ShutdownHost_Task(force) - except Exception as exc: - log.error( - 'Error while shutting down host %s: %s', - host_name, exc, - # Show the traceback if the debug logging level is enabled - exc_info_on_loglevel=logging.DEBUG - ) - return {host_name: 'failed to shut down host'} - - return {host_name: 'shut down host'} diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index 55571742d3c6..3f063e8c3b7f 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -5,23 +5,24 @@ # Import python libs from __future__ import absolute_import, print_function, unicode_literals -import codecs -import copy import errno -import hashlib -import logging -import multiprocessing import os -import pipes -import re +import stat +import codecs import shutil +import uuid +import hashlib import socket -import stat -import subprocess -import sys import tempfile import time +import subprocess +import multiprocessing +import logging +import pipes +import msgpack import traceback +import copy +import re import uuid @@ -46,9 +47,6 @@ try: import winrm from winrm.exceptions import WinRMTransportError - from winrm.exceptions import InvalidCredentialsError - from requests.exceptions import ReadTimeout - from requests.exceptions import ConnectionError HAS_WINRM = True except ImportError: @@ -66,7 +64,6 @@ import salt.utils.event import salt.utils.files import salt.utils.path -import salt.utils.msgpack import salt.utils.platform import salt.utils.stringutils import salt.utils.versions @@ -627,7 +624,7 @@ def bootstrap(vm_, opts=None): 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), - args={'kwargs': salt.utils.data.simple_types_filter(event_kwargs)}, + args={'kwargs': event_kwargs}, sock_dir=opts.get( 'sock_dir', os.path.join(__opts__['sock_dir'], 'master')), @@ -1025,8 +1022,12 @@ def wait_for_psexecsvc(host, port, username, password, timeout=900): if time.time() - start > timeout: return False log.debug( - 'Retrying psexec connection to host %s on port %s (try %s)', - host, port, try_count + 'Retrying psexec connection to host {0} on port {1} ' + '(try {2})'.format( + host, + port, + try_count + ) ) time.sleep(1) @@ -1067,24 +1068,6 @@ def wait_for_winrm(host, port, username, password, timeout=900, use_ssl=True, ve log.debug('Return code was %s', r.status_code) except WinRMTransportError as exc: log.debug('Caught exception in wait_for_winrm: %s', exc) - except InvalidCredentialsError as exc: - log.error(( - 'Caught Invalid Credentials error in wait_for_winrm. ' - 'You may have an incorrect username/password, ' - 'the new minion\'s WinRM configuration is not correct, ' - 'the customization spec has not finished, ' - 'or we are waiting for an account rename policy to take effect. ' - 'Connection attempts will continue to be made until the WinRM timeout ' - 'has been exceeded.' - )) - except ReadTimeout as exc: - log.error('Caught Read Timeout while waiting for winrm.') - except ConnectionError as exc: - log.error(( - 'Caught Connection Error while waiting for winrm. ' - 'Connection attempts will continue to be made until the WinRM timeout ' - 'has been exceeded.' - )) if time.time() - start > timeout: log.error('WinRM connection timed out: %s', timeout) @@ -1516,7 +1499,7 @@ def deploy_script(host, ) if sudo: comps = tmp_dir.lstrip('/').rstrip('/').split('/') - if comps: + if len(comps) > 0: if len(comps) > 1 or comps[0] != 'tmp': ret = root_cmd( 'chown {0} "{1}"'.format(username, tmp_dir), @@ -1998,22 +1981,25 @@ def fire_event(key, msg, tag, sock_dir, args=None, transport='zeromq'): ''' Fire deploy action ''' - with salt.utils.event.get_event('master', sock_dir, transport, listen=False) as event: - try: - event.fire_event(msg, tag) - except ValueError: - # We're using at least a 0.17.x version of salt - if isinstance(args, dict): - args[key] = msg - else: - args = {key: msg} - event.fire_event(args, tag) - finally: - event.destroy() + event = salt.utils.event.get_event( + 'master', + sock_dir, + transport, + listen=False) + + try: + event.fire_event(msg, tag) + except ValueError: + # We're using at least a 0.17.x version of salt + if isinstance(args, dict): + args[key] = msg + else: + args = {key: msg} + event.fire_event(args, tag) - # https://github.com/zeromq/pyzmq/issues/173#issuecomment-4037083 - # Assertion failed: get_load () == 0 (poller_base.cpp:32) - time.sleep(0.025) + # https://github.com/zeromq/pyzmq/issues/173#issuecomment-4037083 + # Assertion failed: get_load () == 0 (poller_base.cpp:32) + time.sleep(0.025) def _exec_ssh_cmd(cmd, error_msg=None, allow_failure=False, **kwargs): @@ -2086,7 +2072,7 @@ def scp_file(dest_path, contents=None, kwargs=None, local_file=None): os.close(tmpfd) except OSError as exc: if exc.errno != errno.EBADF: - six.reraise(*sys.exc_info()) + raise exc log.debug('Uploading %s to %s', dest_path, kwargs['hostname']) @@ -2157,7 +2143,7 @@ def scp_file(dest_path, contents=None, kwargs=None, local_file=None): os.remove(file_to_upload) except OSError as exc: if exc.errno != errno.ENOENT: - six.reraise(*sys.exc_info()) + raise exc return retcode @@ -2194,7 +2180,7 @@ def sftp_file(dest_path, contents=None, kwargs=None, local_file=None): os.close(tmpfd) except OSError as exc: if exc.errno != errno.EBADF: - six.reraise(*sys.exc_info()) + raise exc if local_file is not None: file_to_upload = local_file @@ -2259,7 +2245,7 @@ def sftp_file(dest_path, contents=None, kwargs=None, local_file=None): os.remove(file_to_upload) except OSError as exc: if exc.errno != errno.ENOENT: - six.reraise(*sys.exc_info()) + raise exc return retcode @@ -2304,8 +2290,6 @@ def winrm_cmd(session, command, flags, **kwargs): Wrapper for commands to be run against Windows boxes using WinRM. ''' log.debug('Executing WinRM command: %s %s', command, flags) - # rebuild the session to ensure we haven't timed out - session.protocol.transport.build_session() r = session.run_cmd(command, flags) return r.status_code @@ -2394,19 +2378,19 @@ def check_auth(name, sock_dir=None, queue=None, timeout=300): This function is called from a multiprocess instance, to wait for a minion to become available to receive salt commands ''' - with salt.utils.event.SaltEvent('master', sock_dir, listen=True) as event: - starttime = time.mktime(time.localtime()) - newtimeout = timeout - log.debug('In check_auth, waiting for %s to become available', name) - while newtimeout > 0: - newtimeout = timeout - (time.mktime(time.localtime()) - starttime) - ret = event.get_event(full=True) - if ret is None: - continue - if ret['tag'] == 'salt/minion/{0}/start'.format(name): - queue.put(name) - newtimeout = 0 - log.debug('Minion %s is ready to receive commands', name) + event = salt.utils.event.SaltEvent('master', sock_dir, listen=True) + starttime = time.mktime(time.localtime()) + newtimeout = timeout + log.debug('In check_auth, waiting for %s to become available', name) + while newtimeout > 0: + newtimeout = timeout - (time.mktime(time.localtime()) - starttime) + ret = event.get_event(full=True) + if ret is None: + continue + if ret['tag'] == 'salt/minion/{0}/start'.format(name): + queue.put(name) + newtimeout = 0 + log.debug('Minion %s is ready to receive commands', name) def ip_to_int(ip): @@ -2650,9 +2634,7 @@ def cachedir_index_add(minion_id, profile, driver, provider, base=None): if os.path.exists(index_file): mode = 'rb' if six.PY3 else 'r' with salt.utils.files.fopen(index_file, mode) as fh_: - index = salt.utils.data.decode( - salt.utils.msgpack.msgpack.load( - fh_, encoding=MSGPACK_ENCODING)) + index = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING)) else: index = {} @@ -2669,7 +2651,7 @@ def cachedir_index_add(minion_id, profile, driver, provider, base=None): mode = 'wb' if six.PY3 else 'w' with salt.utils.files.fopen(index_file, mode) as fh_: - salt.utils.msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING) + msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING) unlock_file(index_file) @@ -2686,8 +2668,7 @@ def cachedir_index_del(minion_id, base=None): if os.path.exists(index_file): mode = 'rb' if six.PY3 else 'r' with salt.utils.files.fopen(index_file, mode) as fh_: - index = salt.utils.data.decode( - salt.utils.msgpack.load(fh_, encoding=MSGPACK_ENCODING)) + index = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING)) else: return @@ -2696,7 +2677,7 @@ def cachedir_index_del(minion_id, base=None): mode = 'wb' if six.PY3 else 'w' with salt.utils.files.fopen(index_file, mode) as fh_: - salt.utils.msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING) + msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING) unlock_file(index_file) @@ -2754,7 +2735,7 @@ def request_minion_cachedir( path = os.path.join(base, 'requested', fname) mode = 'wb' if six.PY3 else 'w' with salt.utils.files.fopen(path, mode) as fh_: - salt.utils.msgpack.dump(data, fh_, encoding=MSGPACK_ENCODING) + msgpack.dump(data, fh_, encoding=MSGPACK_ENCODING) def change_minion_cachedir( @@ -2786,13 +2767,12 @@ def change_minion_cachedir( path = os.path.join(base, cachedir, fname) with salt.utils.files.fopen(path, 'r') as fh_: - cache_data = salt.utils.data.decode( - salt.utils.msgpack.load(fh_, encoding=MSGPACK_ENCODING)) + cache_data = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING)) cache_data.update(data) with salt.utils.files.fopen(path, 'w') as fh_: - salt.utils.msgpack.dump(cache_data, fh_, encoding=MSGPACK_ENCODING) + msgpack.dump(cache_data, fh_, encoding=MSGPACK_ENCODING) def activate_minion_cachedir(minion_id, base=None): @@ -2866,8 +2846,7 @@ def list_cache_nodes_full(opts=None, provider=None, base=None): minion_id = fname[:-2] # strip '.p' from end of msgpack filename mode = 'rb' if six.PY3 else 'r' with salt.utils.files.fopen(fpath, mode) as fh_: - minions[driver][prov][minion_id] = salt.utils.data.decode( - salt.utils.msgpack.load(fh_, encoding=MSGPACK_ENCODING)) + minions[driver][prov][minion_id] = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING)) return minions @@ -3028,7 +3007,7 @@ def cache_node_list(nodes, provider, opts): path = os.path.join(prov_dir, '{0}.p'.format(node)) mode = 'wb' if six.PY3 else 'w' with salt.utils.files.fopen(path, mode) as fh_: - salt.utils.msgpack.dump(nodes[node], fh_, encoding=MSGPACK_ENCODING) + msgpack.dump(nodes[node], fh_, encoding=MSGPACK_ENCODING) def cache_node(node, provider, opts): @@ -3054,7 +3033,7 @@ def cache_node(node, provider, opts): path = os.path.join(prov_dir, '{0}.p'.format(node['name'])) mode = 'wb' if six.PY3 else 'w' with salt.utils.files.fopen(path, mode) as fh_: - salt.utils.msgpack.dump(node, fh_, encoding=MSGPACK_ENCODING) + msgpack.dump(node, fh_, encoding=MSGPACK_ENCODING) def missing_node_cache(prov_dir, node_list, provider, opts): @@ -3129,8 +3108,7 @@ def diff_node_cache(prov_dir, node, new_data, opts): with salt.utils.files.fopen(path, 'r') as fh_: try: - cache_data = salt.utils.data.decode( - salt.utils.msgpack.load(fh_, encoding=MSGPACK_ENCODING)) + cache_data = salt.utils.data.decode(msgpack.load(fh_, encoding=MSGPACK_ENCODING)) except ValueError: log.warning('Cache for %s was corrupt: Deleting', node) cache_data = {} diff --git a/salt/utils/verify.py b/salt/utils/verify.py index 5eb8481069c7..583c5910f83a 100644 --- a/salt/utils/verify.py +++ b/salt/utils/verify.py @@ -35,6 +35,7 @@ log = logging.getLogger(__name__) ROOT_DIR = 'c:\\salt' if salt.utils.platform.is_windows() else '/' +DEFAULT_SCHEMES = ['tcp://', 'udp://', 'file://'] def zmq_version(): @@ -146,6 +147,28 @@ def verify_socket(interface, pub_port, ret_port): return True +def verify_logs_filter(files): + to_verify = [] + for filename in files: + verify_file = True + for scheme in DEFAULT_SCHEMES: + if filename.startswith(scheme): + verify_file = False + break + if verify_file: + to_verify.append(filename) + return to_verify + + +def verify_log_files(files, user): + ''' + Verify the log files exist and are owned by the named user. Filenames that + begin with tcp:// and udp:// will be filtered out. Filenames that begin + with file:// are handled correctly + ''' + return verify_files(verify_logs_filter(files), user) + + def verify_files(files, user): ''' Verify that the named files exist and are owned by the named user diff --git a/tests/integration/cloud/clouds/test_digitalocean.py b/tests/integration/cloud/clouds/test_digitalocean.py index d9069dfb72c4..688596f2b7f8 100644 --- a/tests/integration/cloud/clouds/test_digitalocean.py +++ b/tests/integration/cloud/clouds/test_digitalocean.py @@ -5,71 +5,30 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os +import base64 +import hashlib +from Crypto.PublicKey import RSA # Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS -from tests.support.helpers import expensiveTest, generate_random_name +from tests.integration.cloud.helpers.cloud_test_base import CloudTest, TIMEOUT # Import Salt Libs -from salt.config import cloud_providers_config +from salt.ext.six.moves import range +import salt.utils.stringutils -# Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'digitalocean' - - -@expensiveTest -class DigitalOceanTest(ShellCase): +class DigitalOceanTest(CloudTest): ''' Integration tests for the DigitalOcean cloud provider in Salt-Cloud ''' - - def setUp(self): - ''' - Sets up the test requirements - ''' - super(DigitalOceanTest, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'digitalocean-config' - providers = self.run_cloud('--list-providers') - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if personal access token, ssh_key_file, and ssh_key_names are present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - personal_token = config[profile_str][PROVIDER_NAME]['personal_access_token'] - ssh_file = config[profile_str][PROVIDER_NAME]['ssh_key_file'] - ssh_name = config[profile_str][PROVIDER_NAME]['ssh_key_name'] - - if personal_token == '' or ssh_file == '' or ssh_name == '': - self.skipTest( - 'A personal access token, an ssh key file, and an ssh key name ' - 'must be provided to run these tests. Check ' - 'tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) + PROVIDER = 'digitalocean' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('personal_access_token', 'ssh_key_file', 'ssh_key_name') def test_list_images(self): ''' Tests the return of running the --list-images command for digitalocean ''' - image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME)) + image_list = self.run_cloud('--list-images {0}'.format(self.PROVIDER)) self.assertIn( '14.04.5 x64', [i.strip() for i in image_list] @@ -79,7 +38,7 @@ def test_list_locations(self): ''' Tests the return of running the --list-locations command for digitalocean ''' - _list_locations = self.run_cloud('--list-locations {0}'.format(PROVIDER_NAME)) + _list_locations = self.run_cloud('--list-locations {0}'.format(self.PROVIDER)) self.assertIn( 'San Francisco 2', [i.strip() for i in _list_locations] @@ -89,7 +48,7 @@ def test_list_sizes(self): ''' Tests the return of running the --list-sizes command for digitalocean ''' - _list_sizes = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME)) + _list_sizes = self.run_cloud('--list-sizes {0}'.format(self.PROVIDER)) self.assertIn( '16gb', [i.strip() for i in _list_sizes] @@ -99,20 +58,26 @@ def test_key_management(self): ''' Test key management ''' - pub = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example' - finger_print = '3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa' - - _key = self.run_cloud('-f create_key {0} name="MyPubKey" public_key="{1}"'.format(PROVIDER_NAME, pub)) + do_key_name = self.instance_name + '-key' - # Upload public key - self.assertIn( - finger_print, - [i.strip() for i in _key] - ) + # generate key and fingerprint + ssh_key = RSA.generate(4096) + pub = salt.utils.stringutils.to_str(ssh_key.publickey().exportKey("OpenSSH")) + key_hex = hashlib.md5(base64.b64decode(pub.strip().split()[1].encode())).hexdigest() + finger_print = ':'.join([key_hex[x:x+2] for x in range(0, len(key_hex), 2)]) try: + _key = self.run_cloud('-f create_key {0} name="{1}" public_key="{2}"'.format(self.PROVIDER, + do_key_name, pub)) + + # Upload public key + self.assertIn( + finger_print, + [i.strip() for i in _key] + ) + # List all keys - list_keypairs = self.run_cloud('-f list_keypairs {0}'.format(PROVIDER_NAME)) + list_keypairs = self.run_cloud('-f list_keypairs {0}'.format(self.PROVIDER)) self.assertIn( finger_print, @@ -120,45 +85,25 @@ def test_key_management(self): ) # List key - show_keypair = self.run_cloud('-f show_keypair {0} keyname={1}'.format(PROVIDER_NAME, 'MyPubKey')) - + show_keypair = self.run_cloud('-f show_keypair {0} keyname={1}'.format(self.PROVIDER, do_key_name)) self.assertIn( finger_print, [i.strip() for i in show_keypair] ) except AssertionError: # Delete the public key if the above assertions fail - self.run_cloud('-f remove_key {0} id={1}'.format(PROVIDER_NAME, finger_print)) + self.run_cloud('-f remove_key {0} id={1}'.format(self.PROVIDER, finger_print)) raise - - # Delete public key - self.assertTrue(self.run_cloud('-f remove_key {0} id={1}'.format(PROVIDER_NAME, finger_print))) + finally: + # Delete public key + self.assertTrue(self.run_cloud('-f remove_key {0} id={1}'.format(self.PROVIDER, finger_print))) def test_instance(self): ''' Test creating an instance on DigitalOcean ''' # check if instance with salt installed returned - try: - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in self.run_cloud('-p digitalocean-test {0}'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) - raise - - # delete the instance - try: - self.assertIn( - 'True', - [i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - raise + ret_str = self.run_cloud('-p digitalocean-test {0}'.format(self.instance_name), timeout=TIMEOUT) + self.assertInstanceExists(ret_str) - # Final clean-up of created instance, in case something went wrong. - # This was originally in a tearDown function, but that didn't make sense - # To run this for each test when not all tests create instances. - if INSTANCE_NAME in [i.strip() for i in self.run_cloud('--query')]: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) + self.assertDestroyInstance() diff --git a/tests/integration/cloud/clouds/test_dimensiondata.py b/tests/integration/cloud/clouds/test_dimensiondata.py index aa3ff75a09c6..3b8f6f8fe5cd 100644 --- a/tests/integration/cloud/clouds/test_dimensiondata.py +++ b/tests/integration/cloud/clouds/test_dimensiondata.py @@ -5,84 +5,23 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os -import random -import string # Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.helpers import expensiveTest -from tests.support.runtests import RUNTIME_VARS +from tests.integration.cloud.helpers.cloud_test_base import CloudTest, TIMEOUT -# Import Salt Libs -from salt.config import cloud_providers_config -from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin - -def _random_name(size=6): - ''' - Generates a random cloud instance name - ''' - return 'cloud-test-' + ''.join( - random.choice(string.ascii_lowercase + string.digits) - for x in range(size) - ) - - -# Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = _random_name() -PROVIDER_NAME = 'dimensiondata' - - -@expensiveTest -class DimensionDataTest(ShellCase): +class DimensionDataTest(CloudTest): ''' Integration tests for the Dimension Data cloud provider in Salt-Cloud ''' - - def setUp(self): - ''' - Sets up the test requirements - ''' - super(DimensionDataTest, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'dimensiondata-config' - providers = self.run_cloud('--list-providers') - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if user_id, key, and region are present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - user_id = config[profile_str][PROVIDER_NAME]['user_id'] - key = config[profile_str][PROVIDER_NAME]['key'] - region = config[profile_str][PROVIDER_NAME]['region'] - - if user_id == '' or key == '' or region == '': - self.skipTest( - 'A user Id, password, and a region ' - 'must be provided to run these tests. Check ' - 'tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) + PROVIDER = 'dimensiondata' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('key', 'region', 'user_id') def test_list_images(self): ''' Tests the return of running the --list-images command for the dimensiondata cloud provider ''' - image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME)) + image_list = self.run_cloud('--list-images {0}'.format(self.PROVIDER)) self.assertIn( 'Ubuntu 14.04 2 CPU', [i.strip() for i in image_list] @@ -92,7 +31,7 @@ def test_list_locations(self): ''' Tests the return of running the --list-locations command for the dimensiondata cloud provider ''' - _list_locations = self.run_cloud('--list-locations {0}'.format(PROVIDER_NAME)) + _list_locations = self.run_cloud('--list-locations {0}'.format(self.PROVIDER)) self.assertIn( 'Australia - Melbourne MCP2', [i.strip() for i in _list_locations] @@ -102,7 +41,7 @@ def test_list_sizes(self): ''' Tests the return of running the --list-sizes command for the dimensiondata cloud provider ''' - _list_sizes = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME)) + _list_sizes = self.run_cloud('--list-sizes {0}'.format(self.PROVIDER)) self.assertIn( 'default', [i.strip() for i in _list_sizes] @@ -113,26 +52,7 @@ def test_instance(self): Test creating an instance on Dimension Data's cloud ''' # check if instance with salt installed returned - try: - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in self.run_cloud('-p dimensiondata-test {0}'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) - raise - - # delete the instance - try: - self.assertIn( - 'True', - [i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - raise + ret_val = self.run_cloud('-p dimensiondata-test {0}'.format(self.instance_name), timeout=TIMEOUT) + self.assertInstanceExists(ret_val) - # Final clean-up of created instance, in case something went wrong. - # This was originally in a tearDown function, but that didn't make sense - # To run this for each test when not all tests create instances. - if INSTANCE_NAME in [i.strip() for i in self.run_cloud('--query')]: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) + self.assertDestroyInstance() diff --git a/tests/integration/cloud/clouds/test_ec2.py b/tests/integration/cloud/clouds/test_ec2.py index 6393af3a683d..35196d1f7d53 100644 --- a/tests/integration/cloud/clouds/test_ec2.py +++ b/tests/integration/cloud/clouds/test_ec2.py @@ -9,116 +9,69 @@ import yaml # Import Salt Libs -from salt.config import cloud_providers_config import salt.utils.cloud import salt.utils.files import salt.utils.yaml # Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS -from tests.support.helpers import expensiveTest, generate_random_name -from tests.support.unit import skipIf, WAR_ROOM_SKIP +from tests.support.paths import FILES +from tests.support.helpers import expensiveTest +from tests.support.unit import skipIf from tests.support import win_installer - # Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'ec2' +from tests.integration.cloud.helpers.cloud_test_base import CloudTest + HAS_WINRM = salt.utils.cloud.HAS_WINRM and salt.utils.cloud.HAS_SMB +# THis test needs a longer timeout than other cloud tests TIMEOUT = 1200 -@skipIf(WAR_ROOM_SKIP, 'WAR ROOM TEMPORARY SKIP') -@expensiveTest -class EC2Test(ShellCase): +class EC2Test(CloudTest): ''' Integration tests for the EC2 cloud provider in Salt-Cloud ''' - - def _installer_name(self): - ''' - Determine the downloaded installer name by searching the files - directory for the firt file that loosk like an installer. - ''' - for path, dirs, files in os.walk(RUNTIME_VARS.FILES): + PROVIDER = 'ec2' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('id', 'key', 'keyname', 'private_key', 'location') + + @staticmethod + def __fetch_installer(): + # Determine the downloaded installer name by searching the files + # directory for the first file that looks like an installer. + for path, dirs, files in os.walk(FILES): for file in files: if file.startswith(win_installer.PREFIX): return file - break - return - def _fetch_latest_installer(self): - ''' - Download the latest Windows installer executable - ''' + # If the installer wasn't found in the previous steps, download the latest Windows installer executable name = win_installer.latest_installer_name() - path = os.path.join(RUNTIME_VARS.FILES, name) + path = os.path.join(FILES, name) with salt.utils.files.fopen(path, 'wb') as fp: win_installer.download_and_verify(fp, name) return name - def _ensure_installer(self): + @property + def installer(self): ''' - Make sure the testing environment has a Windows installer executbale. + Make sure the testing environment has a Windows installer executable. ''' - name = self._installer_name() - if name: - return name - return self._fetch_latest_installer() + if not hasattr(self, '_installer'): + self._installer = self.__fetch_installer() + return self._installer + @expensiveTest def setUp(self): ''' Sets up the test requirements ''' - super(EC2Test, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'ec2-config' - providers = self.run_cloud('--list-providers') - - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if id, key, keyname, securitygroup, private_key, location, - # and provider are present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - id_ = config[profile_str][PROVIDER_NAME]['id'] - key = config[profile_str][PROVIDER_NAME]['key'] - key_name = config[profile_str][PROVIDER_NAME]['keyname'] - private_key = config[profile_str][PROVIDER_NAME]['private_key'] - location = config[profile_str][PROVIDER_NAME]['location'] - group_or_subnet = config[profile_str][PROVIDER_NAME].get('securitygroup', '') + group_or_subnet = self.provider_config.get('securitygroup') if not group_or_subnet: - group_or_subnet = config[profile_str][PROVIDER_NAME].get('subnetid', '') - - conf_items = [id_, key, key_name, private_key, location, group_or_subnet] - missing_conf_item = [] + group_or_subnet = self.provider_config.get('subnetid') - for item in conf_items: - if item == '': - missing_conf_item.append(item) + if not group_or_subnet: + self.skipTest('securitygroup or subnetid missing for {} config'.format(self.PROVIDER)) - if missing_conf_item: - self.skipTest( - 'An id, key, keyname, security group, private key, and location must ' - 'be provided to run these tests. One or more of these elements is ' - 'missing. Check tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) - self.INSTALLER = self._ensure_installer() + super(EC2Test, self).setUp() def override_profile_config(self, name, data): conf_path = os.path.join(self.config_dir, 'cloud.profiles.d', 'ec2.conf') @@ -134,83 +87,53 @@ def copy_file(self, name): configuration directory. The path to the file which is created will be returned. ''' - src = os.path.join(RUNTIME_VARS.FILES, name) + src = os.path.join(FILES, name) dst = os.path.join(self.config_dir, name) with salt.utils.files.fopen(src, 'rb') as sfp: with salt.utils.files.fopen(dst, 'wb') as dfp: dfp.write(sfp.read()) return dst - def _test_instance(self, profile='ec2-test', debug=False, timeout=TIMEOUT): + def _test_instance(self, profile, debug): ''' Tests creating and deleting an instance on EC2 (classic) ''' # create the instance - cmd = '-p {0}'.format(profile) + cmd = ['-p', profile] if debug: - cmd += ' -l debug' - cmd += ' {0}'.format(INSTANCE_NAME) - instance = self.run_cloud(cmd, timeout=timeout) - ret_str = '{0}:'.format(INSTANCE_NAME) + cmd.extend(['-l', 'debug']) + cmd.append(self.instance_name) + ret_val = self.run_cloud(' '.join(cmd), timeout=TIMEOUT) # check if instance returned with salt installed - try: - self.assertIn(ret_str, instance) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=timeout) - raise - - # delete the instance - delete = self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=timeout) - ret_str = ' shutting-down' + self.assertInstanceExists(ret_val) - # check if deletion was performed appropriately - try: - self.assertIn(ret_str, delete) - except AssertionError: - raise + self.assertDestroyInstance() def test_instance_rename(self): ''' Tests creating and renaming an instance on EC2 (classic) ''' # create the instance - rename = INSTANCE_NAME + '-rename' - instance = self.run_cloud('-p ec2-test {0} --no-deploy'.format(INSTANCE_NAME), timeout=TIMEOUT) - ret_str = '{0}:'.format(INSTANCE_NAME) - + ret_val = self.run_cloud('-p ec2-test {0} --no-deploy'.format(self.instance_name), timeout=TIMEOUT) # check if instance returned - try: - self.assertIn(ret_str, instance) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) - raise + self.assertInstanceExists(ret_val) - change_name = self.run_cloud('-a rename {0} newname={1} --assume-yes'.format(INSTANCE_NAME, rename), timeout=TIMEOUT) + changed_name = self.instance_name + '-changed' - check_rename = self.run_cloud('-a show_instance {0} --assume-yes'.format(rename), [rename]) - exp_results = [' {0}:'.format(rename), ' size:', - ' architecture:'] - try: - for result in exp_results: - self.assertIn(result, check_rename[0]) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) - raise + rename_result = self.run_cloud( + '-a rename {0} newname={1} --assume-yes'.format(self.instance_name, changed_name), timeout=TIMEOUT) + self.assertFalse(self._instance_exists(), 'Instance wasn\'t renamed: |\n{}'.format(rename_result)) + self.assertInstanceExists(instance_name=changed_name) - # delete the instance - delete = self.run_cloud('-d {0} --assume-yes'.format(rename), timeout=TIMEOUT) - ret_str = ' shutting-down' - - # check if deletion was performed appropriately - self.assertIn(ret_str, delete) + self.assertDestroyInstance(changed_name) def test_instance(self): ''' Tests creating and deleting an instance on EC2 (classic) ''' - self._test_instance('ec2-test') + self._test_instance('ec2-test', debug=False) def test_win2012r2_psexec(self): ''' @@ -219,15 +142,16 @@ def test_win2012r2_psexec(self): ''' # TODO: psexec calls hang and the test fails by timing out. The same # same calls succeed when run outside of the test environment. + # FIXME? Does this override need to be undone at the end of the test? self.override_profile_config( 'ec2-win2012r2-test', { 'use_winrm': False, 'userdata_file': self.copy_file('windows-firewall-winexe.ps1'), - 'win_installer': self.copy_file(self.INSTALLER), + 'win_installer': self.copy_file(self.installer), }, ) - self._test_instance('ec2-win2012r2-test', debug=True, timeout=TIMEOUT) + self._test_instance('ec2-win2012r2-test', debug=True) @skipIf(not HAS_WINRM, 'Skip when winrm dependencies are missing') def test_win2012r2_winrm(self): @@ -239,30 +163,30 @@ def test_win2012r2_winrm(self): 'ec2-win2012r2-test', { 'userdata_file': self.copy_file('windows-firewall.ps1'), - 'win_installer': self.copy_file(self.INSTALLER), + 'win_installer': self.copy_file(self.installer), 'winrm_ssl_verify': False, 'use_winrm': True, } ) - self._test_instance('ec2-win2012r2-test', debug=True, timeout=TIMEOUT) + self._test_instance('ec2-win2012r2-test', debug=True) def test_win2016_psexec(self): ''' Tests creating and deleting a Windows 2016 instance on EC2 using winrm (classic) ''' - # TODO: winexe calls hang and the test fails by timing out. The same + # TODO: winexe calls hang and the test fails by timing out. The # same calls succeed when run outside of the test environment. self.override_profile_config( 'ec2-win2016-test', { 'use_winrm': False, 'userdata_file': self.copy_file('windows-firewall-winexe.ps1'), - 'win_installer': self.copy_file(self.INSTALLER), + 'win_installer': self.copy_file(self.installer), }, ) - self._test_instance('ec2-win2016-test', debug=True, timeout=TIMEOUT) + self._test_instance('ec2-win2016-test', debug=True) @skipIf(not HAS_WINRM, 'Skip when winrm dependencies are missing') def test_win2016_winrm(self): @@ -274,21 +198,10 @@ def test_win2016_winrm(self): 'ec2-win2016-test', { 'userdata_file': self.copy_file('windows-firewall.ps1'), - 'win_installer': self.copy_file(self.INSTALLER), + 'win_installer': self.copy_file(self.installer), 'winrm_ssl_verify': False, 'use_winrm': True, } ) - self._test_instance('ec2-win2016-test', debug=True, timeout=TIMEOUT) - - def tearDown(self): - ''' - Clean up after tests - ''' - query = self.run_cloud('--query') - ret_str = ' {0}:'.format(INSTANCE_NAME) - - # if test instance is still present, delete it - if ret_str in query: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) + self._test_instance('ec2-win2016-test', debug=True) diff --git a/tests/integration/cloud/clouds/test_gce.py b/tests/integration/cloud/clouds/test_gce.py index 5d2fa9dfbf36..d447f8733306 100644 --- a/tests/integration/cloud/clouds/test_gce.py +++ b/tests/integration/cloud/clouds/test_gce.py @@ -6,142 +6,38 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os - -# Import Salt Libs -from salt.config import cloud_providers_config # Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS -from tests.support.helpers import expensiveTest, generate_random_name, flaky -from tests.support.unit import skipIf, WAR_ROOM_SKIP - -TIMEOUT = 500 +from tests.integration.cloud.helpers.cloud_test_base import TIMEOUT, CloudTest -@skipIf(WAR_ROOM_SKIP, 'WAR ROOM TEMPORARY SKIP') -@expensiveTest -class GCETest(ShellCase): +class GCETest(CloudTest): ''' Integration tests for the GCE cloud provider in Salt-Cloud ''' + PROVIDER = 'gce' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('project', 'service_account_email_address', 'service_account_private_key') - def setUp(self): - ''' - Sets up the test requirements - ''' - super(GCETest, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'gce-config:' - provider = 'gce' - providers = self.run_cloud('--list-providers') - # Create the cloud instance name to be used throughout the tests - self.INSTANCE_NAME = generate_random_name('cloud-test-').lower() - - if profile_str not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(provider) - ) - - # check if project, service_account_email_address, service_account_private_key - # and provider are present - path = os.path.join(RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - provider + '.conf') - config = cloud_providers_config(path) - - project = config['gce-config']['gce']['project'] - service_account_email_address = config['gce-config']['gce']['service_account_email_address'] - service_account_private_key = config['gce-config']['gce']['service_account_private_key'] - - conf_items = [project, service_account_email_address, service_account_private_key] - missing_conf_item = [] - - for item in conf_items: - if item == '': - missing_conf_item.append(item) - - if missing_conf_item: - self.skipTest( - 'An project, service_account_email_address, service_account_private_key must ' - 'be provided to run these tests. One or more of these elements is ' - 'missing. Check tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(provider) - ) - - @flaky def test_instance(self): ''' Tests creating and deleting an instance on GCE ''' # create the instance - instance = self.run_cloud('-p gce-test {0}'.format(self.INSTANCE_NAME), timeout=TIMEOUT) - ret_str = '{0}:'.format(self.INSTANCE_NAME) + ret_str = self.run_cloud('-p gce-test {0}'.format(self.instance_name), timeout=TIMEOUT) # check if instance returned with salt installed - try: - self.assertIn(ret_str, instance) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(self.INSTANCE_NAME), timeout=TIMEOUT) - raise - - # delete the instance - delete = self.run_cloud('-d {0} --assume-yes'.format(self.INSTANCE_NAME), timeout=TIMEOUT) - # example response: ['gce-config:', '----------', ' gce:', '----------', 'cloud-test-dq4e6c:', 'True', ''] - delete_str = ''.join(delete) + self.assertInstanceExists(ret_str) + self.assertDestroyInstance() - # check if deletion was performed appropriately - try: - self.assertIn(self.INSTANCE_NAME, delete_str) - self.assertIn('True', delete_str) - except AssertionError: - raise - - @flaky def test_instance_extra(self): ''' Tests creating and deleting an instance on GCE ''' # create the instance - instance = self.run_cloud('-p gce-test-extra \ - {0}'.format(self.INSTANCE_NAME), - timeout=TIMEOUT) - ret_str = '{0}:'.format(self.INSTANCE_NAME) + ret_str = self.run_cloud('-p gce-test-extra {0}'.format(self.instance_name), timeout=TIMEOUT) # check if instance returned with salt installed - try: - self.assertIn(ret_str, instance) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(self.INSTANCE_NAME), timeout=TIMEOUT) - raise - - # delete the instance - delete = self.run_cloud('-d {0} --assume-yes'.format(self.INSTANCE_NAME), timeout=TIMEOUT) - # example response: ['gce-config:', '----------', ' gce:', '----------', 'cloud-test-dq4e6c:', 'True', ''] - delete_str = ''.join(delete) - - # check if deletion was performed appropriately - try: - self.assertIn(self.INSTANCE_NAME, delete_str) - self.assertIn('True', delete_str) - except AssertionError: - raise - - def tearDown(self): - ''' - Clean up after tests - ''' - # salt-cloud -a show_instance myinstance - query = self.run_cloud('--query') - ret_str = ' {0}:'.format(self.INSTANCE_NAME) - - # if test instance is still present, delete it - if ret_str in query: - self.run_cloud('-d {0} --assume-yes'.format(self.INSTANCE_NAME), timeout=TIMEOUT) + self.assertInstanceExists(ret_str) + self.assertDestroyInstance() diff --git a/tests/integration/cloud/clouds/test_gogrid.py b/tests/integration/cloud/clouds/test_gogrid.py index f5bd6c84aa56..f767d596705f 100644 --- a/tests/integration/cloud/clouds/test_gogrid.py +++ b/tests/integration/cloud/clouds/test_gogrid.py @@ -5,96 +5,28 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os # Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS -from tests.support.helpers import expensiveTest, generate_random_name from tests.support.unit import skipIf -# Import Salt Libs -from salt.config import cloud_providers_config - - # Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'gogrid' +from tests.integration.cloud.helpers.cloud_test_base import CloudTest, TIMEOUT @skipIf(True, 'waiting on bug report fixes from #13365') -@expensiveTest -class GoGridTest(ShellCase): +class GoGridTest(CloudTest): ''' Integration tests for the GoGrid cloud provider in Salt-Cloud ''' - - def setUp(self): - ''' - Sets up the test requirements - ''' - super(GoGridTest, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'gogrid-config' - providers = self.run_cloud('--list-providers') - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if client_key and api_key are present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - api = config[profile_str][PROVIDER_NAME]['apikey'] - shared_secret = config[profile_str][PROVIDER_NAME]['sharedsecret'] - - if api == '' or shared_secret == '': - self.skipTest( - 'An api key and shared secret must be provided to run these tests. ' - 'Check tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) + PROVIDER = 'gogrid' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('apikey', 'sharedsecret') def test_instance(self): ''' Test creating an instance on GoGrid ''' # check if instance with salt installed returned - try: - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in self.run_cloud('-p gogrid-test {0}'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) - raise - - # delete the instance - try: - self.assertIn( - INSTANCE_NAME + ':', - [i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - raise - - def tearDown(self): - ''' - Clean up after tests - ''' - query = self.run_cloud('--query') - ret_str = ' {0}:'.format(INSTANCE_NAME) + ret_str = self.run_cloud('-p gogrid-test {0}'.format(self.instance_name), timeout=TIMEOUT) + self.assertInstanceExists(ret_str) - # if test instance is still present, delete it - if ret_str in query: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) + self.assertDestroyInstance() diff --git a/tests/integration/cloud/clouds/test_joyent.py b/tests/integration/cloud/clouds/test_joyent.py deleted file mode 100644 index 2a4f66bb40d6..000000000000 --- a/tests/integration/cloud/clouds/test_joyent.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -''' - :codeauthor: Nicole Thomas -''' - -# Import Python Libs -from __future__ import absolute_import, print_function, unicode_literals -import os - -# Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS -from tests.support.helpers import expensiveTest, generate_random_name -from tests.support.unit import skipIf, WAR_ROOM_SKIP - -# Import Salt Libs -from salt.config import cloud_providers_config - -# Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'joyent' - - -@skipIf(WAR_ROOM_SKIP, 'WAR ROOM TEMPORARY SKIP') -@expensiveTest -class JoyentTest(ShellCase): - ''' - Integration tests for the Joyent cloud provider in Salt-Cloud - ''' - - def setUp(self): - ''' - Sets up the test requirements - ''' - super(JoyentTest, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'joyent-config' - providers = self.run_cloud('--list-providers') - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if user, password, private_key, and keyname are present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - user = config[profile_str][PROVIDER_NAME]['user'] - password = config[profile_str][PROVIDER_NAME]['password'] - private_key = config[profile_str][PROVIDER_NAME]['private_key'] - keyname = config[profile_str][PROVIDER_NAME]['keyname'] - - if user == '' or password == '' or private_key == '' or keyname == '': - self.skipTest( - 'A user name, password, private_key file path, and a key name ' - 'must be provided to run these tests. Check ' - 'tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) - - def test_instance(self): - ''' - Test creating and deleting instance on Joyent - ''' - try: - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in self.run_cloud('-p joyent-test {0}'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) - raise - - # delete the instance - try: - self.assertIn( - INSTANCE_NAME + ':', - [i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - raise - - def tearDown(self): - ''' - Clean up after tests - ''' - query = self.run_cloud('--query') - ret_str = ' {0}:'.format(INSTANCE_NAME) - - # if test instance is still present, delete it - if ret_str in query: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) diff --git a/tests/integration/cloud/clouds/test_linode.py b/tests/integration/cloud/clouds/test_linode.py index e96c8fb9a39c..4912c029e8ed 100644 --- a/tests/integration/cloud/clouds/test_linode.py +++ b/tests/integration/cloud/clouds/test_linode.py @@ -5,96 +5,25 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os - -# Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS -from tests.support.helpers import expensiveTest, generate_random_name -from tests.support.unit import skipIf, WAR_ROOM_SKIP - -# Import Salt Libs -from salt.config import cloud_providers_config - # Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'linode' +from tests.integration.cloud.helpers.cloud_test_base import CloudTest, TIMEOUT -@skipIf(WAR_ROOM_SKIP, 'WAR ROOM TEMPORARY SKIP') -@expensiveTest -class LinodeTest(ShellCase): +class LinodeTest(CloudTest): ''' Integration tests for the Linode cloud provider in Salt-Cloud ''' - def setUp(self): - ''' - Sets up the test requirements - ''' - super(LinodeTest, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'linode-config' - providers = self.run_cloud('--list-providers') - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if personal access token, ssh_key_file, and ssh_key_names are present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - api = config[profile_str][PROVIDER_NAME]['apikey'] - password = config[profile_str][PROVIDER_NAME]['password'] - if api == '' or password == '': - self.skipTest( - 'An api key and password must be provided to run these tests. Check ' - 'tests/integration/files/conf/cloud.providers.d/{0}.conf'.format( - PROVIDER_NAME - ) - ) + PROVIDER = 'linode' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('apikey', 'password') def test_instance(self): ''' Test creating an instance on Linode ''' # check if instance with salt installed returned - try: - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in self.run_cloud('-p linode-test {0}'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) - raise - - # delete the instance - try: - self.assertIn( - INSTANCE_NAME + ':', - [i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - raise - - def tearDown(self): - ''' - Clean up after tests - ''' - query = self.run_cloud('--query') - ret_str = ' {0}:'.format(INSTANCE_NAME) + ret_str = self.run_cloud('-p linode-test {0}'.format(self.instance_name), timeout=TIMEOUT) + self.assertInstanceExists(ret_str) - # if test instance is still present, delete it - if ret_str in query: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) + self.assertDestroyInstance() diff --git a/tests/integration/cloud/clouds/test_msazure.py b/tests/integration/cloud/clouds/test_msazure.py index c14f4fad79ba..0b8f21da352c 100644 --- a/tests/integration/cloud/clouds/test_msazure.py +++ b/tests/integration/cloud/clouds/test_msazure.py @@ -5,20 +5,15 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os +import logging # Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS -from tests.support.helpers import expensiveTest, generate_random_name -from tests.support.unit import skipIf, WAR_ROOM_SKIP +from tests.integration.cloud.helpers.cloud_test_base import CloudTest +from tests.support.unit import skipIf # Import Salt Libs -from salt.config import cloud_providers_config from salt.utils.versions import LooseVersion -TIMEOUT = 500 - try: import azure # pylint: disable=unused-import HAS_AZURE = True @@ -28,11 +23,10 @@ if HAS_AZURE and not hasattr(azure, '__version__'): import azure.common -# Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'azure' -PROFILE_NAME = 'azure-test' -REQUIRED_AZURE = '0.11.1' +log = logging.getLogger(__name__) + +TIMEOUT = 1000 +REQUIRED_AZURE = '1.1.0' def __has_required_azure(): @@ -44,113 +38,25 @@ def __has_required_azure(): version = LooseVersion(azure.__version__) else: version = LooseVersion(azure.common.__version__) - if LooseVersion(REQUIRED_AZURE) <= version: return True return False -@skipIf(WAR_ROOM_SKIP, 'WAR ROOM TEMPORARY SKIP') -@skipIf(HAS_AZURE is False, 'These tests require the Azure Python SDK to be installed.') -@skipIf(__has_required_azure() is False, 'The Azure Python SDK must be >= 0.11.1.') -@expensiveTest -class AzureTest(ShellCase): +@skipIf(not HAS_AZURE, 'These tests require the Azure Python SDK to be installed.') +@skipIf(not __has_required_azure(), 'The Azure Python SDK must be >= {}.'.format(REQUIRED_AZURE)) +class AzureTest(CloudTest): ''' Integration tests for the Azure cloud provider in Salt-Cloud ''' - - def setUp(self): - ''' - Sets up the test requirements - ''' - super(AzureTest, self).setUp() - - # check if appropriate cloud provider and profile files are present - provider_str = 'azure-config' - providers = self.run_cloud('--list-providers') - if provider_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if subscription_id and certificate_path are present in provider file - provider_config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - sub_id = provider_config[provider_str][PROVIDER_NAME]['subscription_id'] - cert_path = provider_config[provider_str][PROVIDER_NAME]['certificate_path'] - if sub_id == '' or cert_path == '': - self.skipTest( - 'A subscription_id and certificate_path must be provided to run ' - 'these tests. Check ' - 'tests/integration/files/conf/cloud.providers.d/{0}.conf'.format( - PROVIDER_NAME - ) - ) - - # check if ssh_username, ssh_password, and media_link are present - # in the azure configuration file - ssh_user = provider_config[provider_str][PROVIDER_NAME]['ssh_username'] - ssh_pass = provider_config[provider_str][PROVIDER_NAME]['ssh_password'] - media_link = provider_config[provider_str][PROVIDER_NAME]['media_link'] - - if ssh_user == '' or ssh_pass == '' or media_link == '': - self.skipTest( - 'An ssh_username, ssh_password, and media_link must be provided to run ' - 'these tests. One or more of these elements is missing. Check ' - 'tests/integration/files/conf/cloud.profiles.d/{0}.conf'.format( - PROVIDER_NAME - ) - ) + PROVIDER = 'azurearm' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('subscription_id',) def test_instance(self): ''' Test creating an instance on Azure ''' # check if instance with salt installed returned - try: - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in self.run_cloud( - '-p {0} {1}'.format( - PROFILE_NAME, - INSTANCE_NAME - ), timeout=TIMEOUT - )] - ) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), - timeout=TIMEOUT) - raise - - # delete the instance - try: - self.assertIn( - INSTANCE_NAME + ':', - [i.strip() for i in self.run_cloud( - '-d {0} --assume-yes'.format( - INSTANCE_NAME - ), timeout=TIMEOUT - )] - ) - except AssertionError: - raise - - def tearDown(self): - ''' - Clean up after tests - ''' - query = self.run_cloud('--query') - ret_str = ' {0}:'.format(INSTANCE_NAME) - - # if test instance is still present, delete it - if ret_str in query: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), - timeout=TIMEOUT) + ret_val = self.run_cloud('-p azure-test {0}'.format(self.instance_name), timeout=TIMEOUT) + self.assertInstanceExists(ret_val) + self.assertDestroyInstance(timeout=TIMEOUT) diff --git a/tests/integration/cloud/clouds/test_oneandone.py b/tests/integration/cloud/clouds/test_oneandone.py index 32ffd50680c1..bd7779b5d367 100644 --- a/tests/integration/cloud/clouds/test_oneandone.py +++ b/tests/integration/cloud/clouds/test_oneandone.py @@ -5,16 +5,10 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os # Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS +from tests.integration.cloud.helpers.cloud_test_base import CloudTest, TIMEOUT from tests.support.unit import skipIf -from tests.support.helpers import expensiveTest, generate_random_name - -# Import Salt Libs -from salt.config import cloud_providers_config # Import Third-Party Libs try: @@ -24,59 +18,19 @@ HAS_ONEANDONE = False -# Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'oneandone' -DRIVER_NAME = 'oneandone' - - @skipIf(HAS_ONEANDONE is False, 'salt-cloud requires >= 1and1 1.2.0') -@expensiveTest -class OneAndOneTest(ShellCase): +class OneAndOneTest(CloudTest): ''' Integration tests for the 1and1 cloud provider ''' - - def setUp(self): - ''' - Sets up the test requirements - ''' - super(OneAndOneTest, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'oneandone-config' - providers = self.run_cloud('--list-providers') - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf ' - 'files in tests/integration/files/conf/cloud.*.d/ to run ' - 'these tests.'.format(PROVIDER_NAME) - ) - - # check if api_token present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - api_token = config[profile_str][DRIVER_NAME]['api_token'] - if api_token == '': - self.skipTest( - 'api_token must be provided to ' - 'run these tests. Check ' - 'tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) + PROVIDER = 'oneandone' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('api_token',) def test_list_images(self): ''' Tests the return of running the --list-images command for 1and1 ''' - image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME)) + image_list = self.run_cloud('--list-images {0}'.format(self.PROVIDER_NAME)) self.assertIn( 'coreOSimage', [i.strip() for i in image_list] @@ -87,35 +41,7 @@ def test_instance(self): Test creating an instance on 1and1 ''' # check if instance with salt installed returned - try: - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in self.run_cloud( - '-p oneandone-test {0}'.format(INSTANCE_NAME), timeout=500 - )] - ) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) - raise - - # delete the instance - try: - self.assertIn( - INSTANCE_NAME + ':', - [i.strip() for i in self.run_cloud( - '-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500 - )] - ) - except AssertionError: - raise - - def tearDown(self): - ''' - Clean up after tests - ''' - query = self.run_cloud('--query') - ret = ' {0}:'.format(INSTANCE_NAME) + ret_str = self.run_cloud('-p oneandone-test {0}'.format(self.instance_name), timeout=TIMEOUT) + self.assertInstanceExists(ret_str) - # if test instance is still present, delete it - if ret in query: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) + self.assertDestroyInstance() diff --git a/tests/integration/cloud/clouds/test_openstack.py b/tests/integration/cloud/clouds/test_openstack.py index 3d2fb83735ea..b2b4e9ff1635 100644 --- a/tests/integration/cloud/clouds/test_openstack.py +++ b/tests/integration/cloud/clouds/test_openstack.py @@ -6,17 +6,15 @@ # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging -import os # Import Salt Testing libs -from tests.support.case import ModuleCase, ShellCase -from tests.support.runtests import RUNTIME_VARS +from tests.support.case import ModuleCase from tests.support.unit import skipIf -from tests.support.helpers import destructiveTest, expensiveTest, generate_random_name +from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin # Import Salt Libs -from salt.config import cloud_providers_config +from tests.integration.cloud.helpers.cloud_test_base import TIMEOUT, CloudTest log = logging.getLogger(__name__) @@ -31,22 +29,17 @@ # Import Third-Party Libs try: import shade # pylint: disable=unused-import + HAS_SHADE = True except ImportError: HAS_SHADE = False -# Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'openstack' -DRIVER_NAME = 'openstack' - @skipIf( not HAS_KEYSTONE, 'Please install keystoneclient and a keystone server before running' 'openstack integration tests.' ) -@expensiveTest class OpenstackTest(ModuleCase, SaltReturnAssertsMixin): ''' Validate the keystone state @@ -54,6 +47,7 @@ class OpenstackTest(ModuleCase, SaltReturnAssertsMixin): endpoint = 'http://localhost:35357/v2.0' token = 'administrator' + @destructiveTest def test_aaa_setup_keystone_endpoint(self): ret = self.run_state('keystone.service_present', name='keystone', @@ -176,78 +170,19 @@ def test_libcloud_auth_v3(self): @skipIf(not HAS_SHADE, 'openstack driver requires `shade`') -@expensiveTest -class RackspaceTest(ShellCase): +class RackspaceTest(CloudTest): ''' Integration tests for the Rackspace cloud provider using the Openstack driver ''' - - def setUp(self): - ''' - Sets up the test requirements - ''' - super(RackspaceTest, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'openstack-config' - providers = self.run_cloud('--list-providers') - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if personal access token, ssh_key_file, and ssh_key_names are present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - region_name = config[profile_str][DRIVER_NAME].get('region_name') - auth = config[profile_str][DRIVER_NAME].get('auth') - cloud = config[profile_str][DRIVER_NAME].get('cloud') - if not region_name or not (auth or cloud): - self.skipTest( - 'A region_name and (auth or cloud) must be provided to run these ' - 'tests. Check tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) + PROVIDER = 'openstack' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('auth', 'cloud', 'region_name') def test_instance(self): ''' Test creating an instance on rackspace with the openstack driver ''' # check if instance with salt installed returned - try: - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in self.run_cloud('-p rackspace-test {0}'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) - raise - - # delete the instance - try: - self.assertIn( - INSTANCE_NAME + ':', - [i.strip() for i in self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)] - ) - except AssertionError: - raise - - def tearDown(self): - ''' - Clean up after tests - ''' - query = self.run_cloud('--query') - ret = ' {0}:'.format(INSTANCE_NAME) + ret_val = self.run_cloud('-p rackspace-test {0}'.format(self.instance_name), timeout=TIMEOUT) + self.assertInstanceExists(ret_val) - # if test instance is still present, delete it - if ret in query: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) + self.assertDestroyInstance() diff --git a/tests/integration/cloud/clouds/test_profitbricks.py b/tests/integration/cloud/clouds/test_profitbricks.py index 997f07e94a69..9bd635164cd6 100644 --- a/tests/integration/cloud/clouds/test_profitbricks.py +++ b/tests/integration/cloud/clouds/test_profitbricks.py @@ -5,18 +5,13 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os # Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS from tests.support.unit import skipIf -from tests.support.helpers import expensiveTest, generate_random_name - -# Import Salt Libs -from salt.config import cloud_providers_config # Import Third-Party Libs +from tests.integration.cloud.helpers.cloud_test_base import TIMEOUT, CloudTest + try: # pylint: disable=unused-import from profitbricks.client import ProfitBricksService @@ -24,62 +19,31 @@ except ImportError: HAS_PROFITBRICKS = False -# Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'profitbricks' -DRIVER_NAME = 'profitbricks' - @skipIf(HAS_PROFITBRICKS is False, 'salt-cloud requires >= profitbricks 4.1.0') -@expensiveTest -class ProfitBricksTest(ShellCase): +class ProfitBricksTest(CloudTest): ''' Integration tests for the ProfitBricks cloud provider ''' + PROVIDER = 'profitbricks' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('username', 'password', 'datacenter_id') def setUp(self): - ''' - Sets up the test requirements - ''' super(ProfitBricksTest, self).setUp() + username = self.provider_config.get('username') + password = self.provider_config.get('password') - # check if appropriate cloud provider and profile files are present - profile_str = 'profitbricks-config' - providers = self.run_cloud('--list-providers') - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf ' - 'files in tests/integration/files/conf/cloud.*.d/ to run ' - 'these tests.'.format(PROVIDER_NAME) - ) - - # check if credentials and datacenter_id present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) + # A default username and password must be hard-coded as defaults as per issue #46265 + # If they are 'foo' and 'bar' it is the same as not being set - username = config[profile_str][DRIVER_NAME]['username'] - password = config[profile_str][DRIVER_NAME]['password'] - datacenter_id = config[profile_str][DRIVER_NAME]['datacenter_id'] - self.datacenter_id = datacenter_id - if username in ('' or 'foo') or password in ('' or 'bar') or datacenter_id == '': - self.skipTest( - 'A username, password, and an datacenter must be provided to ' - 'run these tests. Check ' - 'tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) + self.skipTest('Conf items are missing that must be provided to run these tests: username, password' + '\nCheck tests/integration/files/conf/cloud.providers.d/{0}.conf'.format(self.PROVIDER)) def test_list_images(self): ''' Tests the return of running the --list-images command for ProfitBricks ''' - list_images = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME)) + list_images = self.run_cloud('--list-images {0}'.format(self.PROVIDER)) self.assertIn( 'Ubuntu-16.04-LTS-server-2017-10-01', [i.strip() for i in list_images] @@ -90,7 +54,7 @@ def test_list_image_alias(self): Tests the return of running the -f list_images command for ProfitBricks ''' - cmd = '-f list_images {0}'.format(PROVIDER_NAME) + cmd = '-f list_images {0}'.format(self.PROVIDER) list_images = self.run_cloud(cmd) self.assertIn( '- ubuntu:latest', @@ -101,7 +65,7 @@ def test_list_sizes(self): ''' Tests the return of running the --list_sizes command for ProfitBricks ''' - list_sizes = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME)) + list_sizes = self.run_cloud('--list-sizes {0}'.format(self.PROVIDER)) self.assertIn( 'Micro Instance:', [i.strip() for i in list_sizes] @@ -112,10 +76,10 @@ def test_list_datacenters(self): Tests the return of running the -f list_datacenters command for ProfitBricks ''' - cmd = '-f list_datacenters {0}'.format(PROVIDER_NAME) + cmd = '-f list_datacenters {0}'.format(self.PROVIDER) list_datacenters = self.run_cloud(cmd) self.assertIn( - self.datacenter_id, + self.provider_config['datacenter_id'], [i.strip() for i in list_datacenters] ) @@ -123,7 +87,7 @@ def test_list_nodes(self): ''' Tests the return of running the -f list_nodes command for ProfitBricks ''' - list_nodes = self.run_cloud('-f list_nodes {0}'.format(PROVIDER_NAME)) + list_nodes = self.run_cloud('-f list_nodes {0}'.format(self.PROVIDER)) self.assertIn( 'state:', [i.strip() for i in list_nodes] @@ -139,7 +103,7 @@ def test_list_nodes_full(self): Tests the return of running the -f list_nodes_full command for ProfitBricks ''' - cmd = '-f list_nodes_full {0}'.format(PROVIDER_NAME) + cmd = '-f list_nodes_full {0}'.format(self.PROVIDER) list_nodes = self.run_cloud(cmd) self.assertIn( 'state:', @@ -156,7 +120,7 @@ def test_list_location(self): Tests the return of running the --list-locations command for ProfitBricks ''' - cmd = '--list-locations {0}'.format(PROVIDER_NAME) + cmd = '--list-locations {0}'.format(self.PROVIDER) list_locations = self.run_cloud(cmd) self.assertIn( @@ -184,42 +148,7 @@ def test_instance(self): Test creating an instance on ProfitBricks ''' # check if instance with salt installed returned - try: - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in self.run_cloud( - '-p profitbricks-test {0}'.format(INSTANCE_NAME), - timeout=500 - )] - ) - except AssertionError: - self.run_cloud( - '-d {0} --assume-yes'.format(INSTANCE_NAME), - timeout=500 - ) - raise - - # delete the instance - try: - self.assertIn( - INSTANCE_NAME + ':', - [i.strip() for i in self.run_cloud( - '-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500 - )] - ) - except AssertionError: - raise - - def tearDown(self): - ''' - Clean up after tests - ''' - query = self.run_cloud('--query') - ret = ' {0}:'.format(INSTANCE_NAME) - - # if test instance is still present, delete it - if ret in query: - self.run_cloud( - '-d {0} --assume-yes'.format(INSTANCE_NAME), - timeout=500 - ) + ret_str = self.run_cloud('-p profitbricks-test {0}'.format(self.instance_name), timeout=TIMEOUT) + self.assertInstanceExists(ret_str) + + self.assertDestroyInstance() diff --git a/tests/integration/cloud/clouds/test_virtualbox.py b/tests/integration/cloud/clouds/test_virtualbox.py index 1cd7c5966fdc..c0f12269fbc7 100644 --- a/tests/integration/cloud/clouds/test_virtualbox.py +++ b/tests/integration/cloud/clouds/test_virtualbox.py @@ -9,8 +9,8 @@ import socket # Import Salt Testing Libs +import tests.integration as integration from tests.support.unit import TestCase, skipIf -from tests.support.runtests import RUNTIME_VARS from tests.integration.cloud.helpers.virtualbox import (VirtualboxTestCase, VirtualboxCloudTestCase, CONFIG_NAME, @@ -87,7 +87,7 @@ def setUp(self): # check if personal access token, ssh_key_file, and ssh_key_names are present config_path = os.path.join( - RUNTIME_VARS.FILES, + integration.FILES, 'conf', 'cloud.providers.d', PROVIDER_NAME + '.conf' @@ -96,7 +96,7 @@ def setUp(self): providers = cloud_providers_config(config_path) log.debug("config: %s", providers) config_path = os.path.join( - RUNTIME_VARS.FILES, + integration.FILES, 'conf', 'cloud.profiles.d', PROVIDER_NAME + '.conf' @@ -251,7 +251,7 @@ def setUp(self): # check if personal access token, ssh_key_file, and ssh_key_names are present config_path = os.path.join( - RUNTIME_VARS.FILES, + integration.FILES, 'conf', 'cloud.providers.d', PROVIDER_NAME + '.conf' @@ -260,7 +260,7 @@ def setUp(self): providers = cloud_providers_config(config_path) log.debug("config: %s", providers) config_path = os.path.join( - RUNTIME_VARS.FILES, + integration.FILES, 'conf', 'cloud.profiles.d', PROVIDER_NAME + '.conf' diff --git a/tests/integration/cloud/clouds/test_vmware.py b/tests/integration/cloud/clouds/test_vmware.py index 67cf612cb567..0f14f2bb4de6 100644 --- a/tests/integration/cloud/clouds/test_vmware.py +++ b/tests/integration/cloud/clouds/test_vmware.py @@ -5,149 +5,55 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os # Import Salt Libs -from salt.config import cloud_providers_config, cloud_config from salt.ext import six -# Import Salt Testing LIbs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS -from tests.support.helpers import expensiveTest, generate_random_name -from tests.support.unit import skipIf, WAR_ROOM_SKIP - - # Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'vmware' -TIMEOUT = 500 +from tests.integration.cloud.helpers.cloud_test_base import TIMEOUT, CloudTest -@skipIf(WAR_ROOM_SKIP, 'WAR ROOM TEMPORARY SKIP') -@expensiveTest -class VMWareTest(ShellCase): +class VMWareTest(CloudTest): ''' Integration tests for the vmware cloud provider in Salt-Cloud ''' - - def setUp(self): - ''' - Sets up the test requirements - ''' - - # check if appropriate cloud provider and profile files are present - profile_str = 'vmware-config' - providers = self.run_cloud('--list-providers') - - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if user, password, url and provider are present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - user = config[profile_str][PROVIDER_NAME]['user'] - password = config[profile_str][PROVIDER_NAME]['password'] - url = config[profile_str][PROVIDER_NAME]['url'] - - conf_items = [user, password, url] - missing_conf_item = [] - - for item in conf_items: - if item == '': - missing_conf_item.append(item) - - if missing_conf_item: - self.skipTest( - 'A user, password, and url must be provided to run these tests.' - 'One or more of these elements is missing. Check' - 'tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) + PROVIDER = 'vmware' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('password', 'user', 'url') def test_instance(self): ''' Tests creating and deleting an instance on vmware and installing salt ''' # create the instance - profile = os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.profiles.d', - PROVIDER_NAME + '.conf' - ) - - profile_config = cloud_config(profile) - disk_datastore = profile_config['vmware-test']['devices']['disk']['Hard disk 2']['datastore'] + disk_datastore = self.config['vmware-test']['devices']['disk']['Hard disk 2']['datastore'] - instance = self.run_cloud('-p vmware-test {0}'.format(INSTANCE_NAME), timeout=TIMEOUT) - ret_str = '{0}:'.format(INSTANCE_NAME) - disk_datastore_str = ' [{0}] {1}/Hard disk 2-flat.vmdk'.format(disk_datastore, INSTANCE_NAME) + ret_val = self.run_cloud('-p vmware-test {0}'.format(self.instance_name), timeout=TIMEOUT) + disk_datastore_str = ' [{0}] {1}/Hard disk 2-flat.vmdk'.format(disk_datastore, + self.instance_name) # check if instance returned with salt installed - try: - self.assertIn(ret_str, instance) - self.assertIn(disk_datastore_str, instance, - msg='Hard Disk 2 did not use the Datastore {0} '.format(disk_datastore)) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) - raise + self.assertInstanceExists(ret_val) + self.assertIn(disk_datastore_str, ret_val, + msg='Hard Disk 2 did not use the Datastore {0} '.format(disk_datastore)) - # delete the instance - delete = self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) - ret_str = '{0}:\', \' True'.format(INSTANCE_NAME) - - # check if deletion was performed appropriately - self.assertIn(ret_str, six.text_type(delete)) + self.assertDestroyInstance() def test_snapshot(self): ''' Tests creating snapshot and creating vm with --no-deploy ''' # create the instance - instance = self.run_cloud('-p vmware-test {0} --no-deploy'.format(INSTANCE_NAME), - timeout=TIMEOUT) - ret_str = '{0}:'.format(INSTANCE_NAME) + ret_val = self.run_cloud('-p vmware-test {0} --no-deploy'.format(self.instance_name), + timeout=TIMEOUT) # check if instance returned with salt installed - try: - self.assertIn(ret_str, instance) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) - raise + self.assertInstanceExists(ret_val) create_snapshot = self.run_cloud('-a create_snapshot {0} \ snapshot_name=\'Test Cloud\' \ - memdump=True -y'.format(INSTANCE_NAME), - timeout=TIMEOUT) + memdump=True -y'.format(self.instance_name), timeout=TIMEOUT) s_ret_str = 'Snapshot created successfully' self.assertIn(s_ret_str, six.text_type(create_snapshot)) - # delete the instance - delete = self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) - ret_str = '{0}:\', \' True'.format(INSTANCE_NAME) - - self.assertIn(ret_str, six.text_type(delete)) - - def tearDown(self): - ''' - Clean up after tests - ''' - query = self.run_cloud('--query') - ret_str = ' {0}:'.format(INSTANCE_NAME) - - # if test instance is still present, delete it - if ret_str in query: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) + self.assertDestroyInstance() diff --git a/tests/integration/cloud/clouds/test_vultrpy.py b/tests/integration/cloud/clouds/test_vultrpy.py index 089209b40c23..f0dc58e309e7 100644 --- a/tests/integration/cloud/clouds/test_vultrpy.py +++ b/tests/integration/cloud/clouds/test_vultrpy.py @@ -5,76 +5,25 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os import time # Import Salt Testing Libs -from tests.support.case import ShellCase -from tests.support.runtests import RUNTIME_VARS -from tests.support.helpers import expensiveTest, generate_random_name +from tests.integration.cloud.helpers.cloud_test_base import CloudTest, TIMEOUT from tests.support.unit import skipIf -# Import Salt Libs -from salt.config import cloud_providers_config -from salt.ext import six - -# Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'vultr' -TIMEOUT = 500 - - -@expensiveTest -class VultrTest(ShellCase): +class VultrTest(CloudTest): ''' Integration tests for the Vultr cloud provider in Salt-Cloud ''' - - @expensiveTest - def setUp(self): - ''' - Sets up the test requirements - ''' - super(VultrTest, self).setUp() - - # check if appropriate cloud provider and profile files are present - profile_str = 'vultr-config' - providers = self.run_cloud('--list-providers') - if profile_str + ':' not in providers: - self.skipTest( - 'Configuration file for {0} was not found. Check {0}.conf files ' - 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' - .format(PROVIDER_NAME) - ) - - # check if api_key, ssh_key_file, and ssh_key_names are present - config = cloud_providers_config( - os.path.join( - RUNTIME_VARS.FILES, - 'conf', - 'cloud.providers.d', - PROVIDER_NAME + '.conf' - ) - ) - - api_key = config[profile_str][PROVIDER_NAME]['api_key'] - ssh_file = config[profile_str][PROVIDER_NAME]['ssh_key_file'] - ssh_name = config[profile_str][PROVIDER_NAME]['ssh_key_name'] - - if api_key == '' or ssh_file == '' or ssh_name == '': - self.skipTest( - 'An API key, an ssh key file, and an ssh key name ' - 'must be provided to run these tests. Check ' - 'tests/integration/files/conf/cloud.providers.d/{0}.conf' - .format(PROVIDER_NAME) - ) + PROVIDER = 'vultr' + REQUIRED_PROVIDER_CONFIG_ITEMS = ('api_key', 'ssh_key_file', 'ssh_key_name') def test_list_images(self): ''' Tests the return of running the --list-images command for Vultr ''' - image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME)) + image_list = self.run_cloud('--list-images {0}'.format(self.PROVIDER)) self.assertIn( 'Debian 8 x64 (jessie)', @@ -85,7 +34,7 @@ def test_list_locations(self): ''' Tests the return of running the --list-locations command for Vultr ''' - location_list = self.run_cloud('--list-locations {0}'.format(PROVIDER_NAME)) + location_list = self.run_cloud('--list-locations {0}'.format(self.PROVIDER)) self.assertIn( 'New Jersey', [i.strip() for i in location_list] @@ -95,51 +44,51 @@ def test_list_sizes(self): ''' Tests the return of running the --list-sizes command for Vultr ''' - size_list = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME)) + size_list = self.run_cloud('--list-sizes {0}'.format(self.PROVIDER)) self.assertIn( '32768 MB RAM,4x110 GB SSD,40.00 TB BW', [i.strip() for i in size_list] ) # Commented for now, Vultr driver does not yet support key management -# def test_key_management(self): -# ''' -# Test key management -# ''' -# pub = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example' -# finger_print = '3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa' -# -# _key = self.run_cloud('-f create_key {0} name="MyPubKey" public_key="{1}"'.format(PROVIDER_NAME, pub)) -# -# # Upload public key -# self.assertIn( -# finger_print, -# [i.strip() for i in _key] -# ) -# -# try: -# # List all keys -# list_keypairs = self.run_cloud('-f list_keypairs {0}'.format(PROVIDER_NAME)) -# -# self.assertIn( -# finger_print, -# [i.strip() for i in list_keypairs] -# ) -# -# # List key -# show_keypair = self.run_cloud('-f show_keypair {0} keyname={1}'.format(PROVIDER_NAME, 'MyPubKey')) -# -# self.assertIn( -# finger_print, -# [i.strip() for i in show_keypair] -# ) -# except AssertionError: -# # Delete the public key if the above assertions fail -# self.run_cloud('-f remove_key {0} id={1}'.format(PROVIDER_NAME, finger_print)) -# raise -# -# # Delete public key -# self.assertTrue(self.run_cloud('-f remove_key {0} id={1}'.format(PROVIDER_NAME, finger_print))) + # def test_key_management(self): + # ''' + # Test key management + # ''' + # pub = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example' + # finger_print = '3b:16:bf:e4:8b:00:8b:b8:59:8c:a9:d3:f0:19:45:fa' + # + # _key = self.run_cloud('-f create_key {0} name="MyPubKey" public_key="{1}"'.format(self.PROVIDER, pub)) + # + # # Upload public key + # self.assertIn( + # finger_print, + # [i.strip() for i in _key] + # ) + # + # try: + # # List all keys + # list_keypairs = self.run_cloud('-f list_keypairs {0}'.format(self.PROVIDER)) + # + # self.assertIn( + # finger_print, + # [i.strip() for i in list_keypairs] + # ) + # + # # List key + # show_keypair = self.run_cloud('-f show_keypair {0} keyname={1}'.format(self.PROVIDER, 'MyPubKey')) + # + # self.assertIn( + # finger_print, + # [i.strip() for i in show_keypair] + # ) + # except AssertionError: + # # Delete the public key if the above assertions fail + # self.run_cloud('-f remove_key {0} id={1}'.format(self.PROVIDER, finger_print)) + # raise + # + # # Delete public key + # self.assertTrue(self.run_cloud('-f remove_key {0} id={1}'.format(self.PROVIDER, finger_print))) @skipIf(True, 'Skipped temporarily') def test_instance(self): @@ -147,36 +96,9 @@ def test_instance(self): Test creating an instance on Vultr ''' # check if instance with salt installed returned - try: - create_vm = self.run_cloud('-p vultr-test {0}'.format(INSTANCE_NAME), timeout=800) - self.assertIn( - INSTANCE_NAME, - [i.strip() for i in create_vm] - ) - self.assertNotIn('Failed to start', six.text_type(create_vm)) - except AssertionError: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) - raise + ret_val = self.run_cloud('-p vultr-test {0}'.format(self.instance_name), timeout=TIMEOUT + 300) + self.assertInstanceExists(ret_val) # Vultr won't let us delete an instance less than 5 minutes old. - time.sleep(420) - # delete the instance - results = self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) - try: - self.assertIn( - 'True', - [i.strip() for i in results] - ) - except AssertionError: - raise - - # Final clean-up of created instance, in case something went wrong. - # This was originally in a tearDown function, but that didn't make sense - # To run this for each test when not all tests create instances. - # Also, Vultr won't let instances be deleted unless they have been alive for 5 minutes. - # If we exceed 6 minutes and the instance is still there, quit - ct = 0 - while ct < 12 and INSTANCE_NAME in [i.strip() for i in self.run_cloud('--query')]: - self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=TIMEOUT) - time.sleep(30) - ct = ct + 1 + time.sleep(300) + self.assertDestroyInstance() diff --git a/tests/integration/cloud/helpers/cloud_test_base.py b/tests/integration/cloud/helpers/cloud_test_base.py new file mode 100644 index 000000000000..4cab4fc49b2e --- /dev/null +++ b/tests/integration/cloud/helpers/cloud_test_base.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- +''' +Tests for the Openstack Cloud Provider +''' + +# Import python libs +from __future__ import absolute_import, print_function, unicode_literals +from time import sleep +import logging +import os +import shutil + +# Import Salt Testing libs +from tests.support.case import ShellCase +from tests.support.helpers import generate_random_name, expensiveTest +from tests.support.paths import FILES +from tests.support.runtests import RUNTIME_VARS + +# Import Salt Libs +from salt.config import cloud_config, cloud_providers_config +from salt.ext.six.moves import range +from salt.utils.yaml import safe_load + +TIMEOUT = 500 + +log = logging.getLogger(__name__) + + +class CloudTest(ShellCase): + PROVIDER = '' + REQUIRED_PROVIDER_CONFIG_ITEMS = tuple() + TMP_PROVIDER_DIR = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'cloud.providers.d') + __RE_RUN_DELAY = 30 + __RE_TRIES = 12 + + @staticmethod + def clean_cloud_dir(tmp_dir): + ''' + Clean the cloud.providers.d tmp directory + ''' + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + return + + # make sure old provider configs are deleted + for i in os.listdir(tmp_dir): + os.remove(os.path.join(tmp_dir, i)) + + def query_instances(self): + ''' + Standardize the data returned from a salt-cloud --query + ''' + return set(x.strip(': ') for x in self.run_cloud('--query') if x.lstrip().lower().startswith('cloud-test-')) + + def _instance_exists(self, instance_name=None, query=None): + ''' + :param instance_name: The name of the instance to check for in salt-cloud. + For example this is may used when a test temporarily renames an instance + :param query: The result of a salt-cloud --query run outside of this function + ''' + if not instance_name: + instance_name = self.instance_name + if not query: + query = self.query_instances() + + log.debug('Checking for "{}" in {}'.format(instance_name, query)) + if isinstance(query, set): + return instance_name in query + return any(instance_name == q.strip(': ') for q in query) + + def assertInstanceExists(self, creation_ret=None, instance_name=None): + ''' + :param instance_name: Override the checked instance name, otherwise the class default will be used. + :param creation_ret: The return value from the run_cloud() function that created the instance + ''' + if not instance_name: + instance_name = self.instance_name + + # If it exists but doesn't show up in the creation_ret, there was probably an error during creation + if creation_ret: + self.assertIn(instance_name, [i.strip(': ') for i in creation_ret], + 'An error occured during instance creation: |\n\t{}\n\t|'.format( + '\n\t'.join(creation_ret) + )) + else: + # Verify that the instance exists via query + query = self.query_instances() + for tries in range(self.__RE_TRIES): + if self._instance_exists(instance_name, query): + log.debug( + 'Instance "{}" reported after {} seconds'.format(instance_name, tries * self.__RE_RUN_DELAY)) + break + else: + sleep(self.__RE_RUN_DELAY) + query = self.query_instances() + + # Assert that the last query was successful + self.assertTrue(self._instance_exists(instance_name, query), + 'Instance "{}" was not created successfully: {}'.format(self.instance_name, + ', '.join(query))) + + log.debug('Instance exists and was created: "{}"'.format(instance_name)) + + def assertDestroyInstance(self, instance_name=None, timeout=None): + if timeout is None: + timeout = TIMEOUT + if not instance_name: + instance_name = self.instance_name + log.debug('Deleting instance "{}"'.format(instance_name)) + delete_str = self.run_cloud('-d {0} --assume-yes --out=yaml'.format(instance_name), timeout=timeout) + if delete_str: + delete = safe_load('\n'.join(delete_str)) + self.assertIn(self.profile_str, delete) + self.assertIn(self.PROVIDER, delete[self.profile_str]) + self.assertIn(instance_name, delete[self.profile_str][self.PROVIDER]) + + delete_status = delete[self.profile_str][self.PROVIDER][instance_name] + if isinstance(delete_status, str): + self.assertEqual(delete_status, 'True') + return + elif isinstance(delete_status, dict): + current_state = delete_status.get('currentState') + if current_state: + if current_state.get('ACTION'): + self.assertIn('.delete', current_state.get('ACTION')) + return + else: + self.assertEqual(current_state.get('name'), 'shutting-down') + return + # It's not clear from the delete string that deletion was successful, ask salt-cloud after a delay + query = self.query_instances() + # some instances take a while to report their destruction + for tries in range(6): + if self._instance_exists(query=query): + sleep(30) + log.debug('Instance "{}" still found in query after {} tries: {}' + .format(instance_name, tries, query)) + query = self.query_instances() + # The last query should have been successful + self.assertNotIn(instance_name, self.query_instances()) + + @property + def instance_name(self): + if not hasattr(self, '_instance_name'): + # Create the cloud instance name to be used throughout the tests + subclass = self.__class__.__name__.strip('Test') + # Use the first three letters of the subclass, fill with '-' if too short + self._instance_name = generate_random_name('cloud-test-{:-<3}-'.format(subclass[:3])).lower() + return self._instance_name + + @property + def providers(self): + if not hasattr(self, '_providers'): + self._providers = self.run_cloud('--list-providers') + return self._providers + + @property + def provider_config(self): + if not hasattr(self, '_provider_config'): + self._provider_config = cloud_providers_config( + os.path.join( + self.config_dir, + 'cloud.providers.d', + self.PROVIDER + '.conf' + ) + ) + return self._provider_config[self.profile_str][self.PROVIDER] + + @property + def config(self): + if not hasattr(self, '_config'): + self._config = cloud_config( + os.path.join( + self.config_dir, + 'cloud.profiles.d', + self.PROVIDER + '.conf' + ) + ) + return self._config + + @property + def profile_str(self): + return self.PROVIDER + '-config' + + @expensiveTest + def setUp(self): + ''' + Sets up the test requirements. In child classes, define PROVIDER and REQUIRED_CONFIG_ITEMS or this will fail + ''' + super(CloudTest, self).setUp() + + if not self.PROVIDER: + self.fail('A PROVIDER must be defined for this test') + + # check if appropriate cloud provider and profile files are present + if self.profile_str + ':' not in self.providers: + self.skipTest( + 'Configuration file for {0} was not found. Check {0}.conf files ' + 'in tests/integration/files/conf/cloud.*.d/ to run these tests.' + .format(self.PROVIDER) + ) + + missing_conf_item = [] + for att in self.REQUIRED_PROVIDER_CONFIG_ITEMS: + if not self.provider_config.get(att): + missing_conf_item.append(att) + + if missing_conf_item: + self.skipTest('Conf items are missing that must be provided to run these tests: {}' + .format(', '.join(missing_conf_item)) + + '\nCheck tests/integration/files/conf/cloud.providers.d/{0}.conf'.format(self.PROVIDER)) + + def _alt_names(self): + ''' + Check for an instances created alongside this test's instance that weren't cleaned up + ''' + query = self.query_instances() + instances = set() + for q in query: + # Verify but this is a new name and not a shutting down ec2 instance + if q.startswith(self.instance_name) and not q.split('-')[-1].startswith('DEL'): + instances.add(q) + log.debug('Adding "{}" to the set of instances that needs to be deleted'.format(q)) + return instances + + def _ensure_deletion(self, instance_name=None): + ''' + Make sure that the instance absolutely gets deleted, but fail the test if it happens in the tearDown + :return True if an instance was deleted, False if no instance was deleted; and a message + ''' + destroyed = False + if not instance_name: + instance_name = self.instance_name + + if self._instance_exists(instance_name): + for tries in range(3): + try: + self.assertDestroyInstance(instance_name) + return False, 'The instance "{}" was deleted during the tearDown, not the test.'.format( + instance_name) + except AssertionError as e: + log.error('Failed to delete instance "{}". Tries: {}\n{}'.format(instance_name, tries, str(e))) + if not self._instance_exists(): + destroyed = True + break + else: + sleep(30) + + if not destroyed: + # Destroying instances in the tearDown is a contingency, not the way things should work by default. + return False, 'The Instance "{}" was not deleted after multiple attempts'.format(instance_name) + + return True, 'The instance "{}" cleaned up properly after the test'.format(instance_name) + + def tearDown(self): + ''' + Clean up after tests, If the instance still exists for any reason, delete it. + Instances should be destroyed before the tearDown, assertDestroyInstance() should be called exactly + one time in a test for each instance created. This is a failSafe and something went wrong + if the tearDown is where an instance is destroyed. + ''' + success = True + fail_messages = [] + alt_names = self._alt_names() + for instance in alt_names: + alt_destroyed, alt_destroy_message = self._ensure_deletion(instance) + if not alt_destroyed: + success = False + fail_messages.append(alt_destroy_message) + log.error('Failed to destroy instance "{}": {}'.format(instance, alt_destroy_message)) + self.assertTrue(success, '\n'.join(fail_messages)) + self.assertFalse(alt_names, 'Cleanup should happen in the test, not the TearDown') + + @classmethod + def tearDownClass(cls): + cls.clean_cloud_dir(cls.TMP_PROVIDER_DIR) + + @classmethod + def setUpClass(cls): + # clean up before setup + cls.clean_cloud_dir(cls.TMP_PROVIDER_DIR) + + # add the provider config for only the cloud we are testing + provider_file = cls.PROVIDER + '.conf' + shutil.copyfile(os.path.join(os.path.join(FILES, 'conf', 'cloud.providers.d'), provider_file), + os.path.join(os.path.join(cls.TMP_PROVIDER_DIR, provider_file))) diff --git a/tests/integration/cloud/helpers/virtualbox.py b/tests/integration/cloud/helpers/virtualbox.py index a19a41c7533d..a4a6694408ea 100644 --- a/tests/integration/cloud/helpers/virtualbox.py +++ b/tests/integration/cloud/helpers/virtualbox.py @@ -9,7 +9,7 @@ import tests.integration.cloud.helpers from tests.support.case import ShellCase from tests.support.unit import TestCase, skipIf -from tests.support.runtests import RUNTIME_VARS +from tests.support.paths import FILES # Import Salt libs from salt.ext import six @@ -57,7 +57,7 @@ def run_cloud(self, arg_str, catch_stderr=False, timeout=None): @return: @rtype: dict """ - config_path = os.path.join(RUNTIME_VARS.FILES, 'conf') + config_path = os.path.join(FILES, 'conf') arg_str = '--out=json -c {0} {1}'.format(config_path, arg_str) # arg_str = "{0} --log-level=error".format(arg_str) log.debug("running salt-cloud with %s", arg_str) @@ -70,7 +70,7 @@ def run_cloud(self, arg_str, catch_stderr=False, timeout=None): # Attempt to clean json output before fix of https://github.com/saltstack/salt/issues/27629 valid_initial_chars = ['{', '[', '"'] for line in output[:]: - if not line or (line[0] not in valid_initial_chars): + if len(line) == 0 or (line[0] not in valid_initial_chars): output.pop(0) else: break diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index 9b2300b99eed..aacaf5ece857 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -5,55 +5,35 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals -import os -import random -import string # Import Salt Testing libs -from tests.support.case import ShellCase +from tests.integration.cloud.helpers.cloud_test_base import CloudTest from tests.support.helpers import expensiveTest -from tests.support.runtests import RUNTIME_VARS # Import Salt libs import salt.cloud -from salt.ext.six.moves import range -def __random_name(size=6): - ''' - Generates a random cloud instance name - ''' - return 'CLOUD-TEST-' + ''.join( - random.choice(string.ascii_uppercase + string.digits) - for x in range(size) - ) - - -# Create the cloud instance name to be used throughout the tests -INSTANCE_NAME = __random_name() - - -class CloudClientTestCase(ShellCase): +class CloudClientTestCase(CloudTest): ''' Integration tests for the CloudClient class. Uses DigitalOcean as a salt-cloud provider. ''' + PROVIDER = 'digitalocean' + REQUIRED_PROVIDER_CONFIG_ITEMS = tuple() + IMAGE_NAME = '14.04.5 x64' @expensiveTest def setUp(self): - self.config_file = os.path.join(RUNTIME_VARS.TMP_CONF_CLOUD_PROVIDER_INCLUDES, - 'digitalocean.conf') - self.provider_name = 'digitalocean-config' - self.image_name = '14.04.5 x64' # Use a --list-images salt-cloud call to see if the DigitalOcean provider is # configured correctly before running any tests. - images = self.run_cloud('--list-images {0}'.format(self.provider_name)) + images = self.run_cloud('--list-images {0}'.format(self.PROVIDER)) if self.image_name not in [i.strip() for i in images]: self.skipTest( 'Image \'{0}\' was not found in image search. Is the {1} provider ' 'configured correctly for this test?'.format( - self.provider_name, + self.PROVIDER, self.image_name ) ) @@ -71,20 +51,18 @@ def test_cloud_client_create_and_delete(self): cloud_client = salt.cloud.CloudClient(self.config_file) # Create the VM using salt.cloud.CloudClient.create() instead of calling salt-cloud - created = cloud_client.create( - provider=self.provider_name, - names=[INSTANCE_NAME], - image=self.image_name, - location='sfo1', - size='512mb', - vm_size='512mb' + ret_val = cloud_client.create( + provider=self.PROVIDER, + names=[self.instance_name], + image=self.IMAGE_NAME, + location='sfo1', size='512mb', vm_size='512mb' ) # Check that the VM was created correctly - self.assertIn(INSTANCE_NAME, created) + self.assertInstanceExists(ret_val) # Clean up after ourselves and delete the VM - deleted = cloud_client.destroy(names=[INSTANCE_NAME]) + deleted = cloud_client.destroy(names=[self.instance_name]) # Check that the VM was deleted correctly - self.assertIn(INSTANCE_NAME, deleted) + self.assertIn(self.instance_name, deleted) diff --git a/tests/integration/files/conf/cloud.profiles.d/azure.conf b/tests/integration/files/conf/cloud.profiles.d/azure.conf index fe97e00b0857..bfc749cc9e51 100644 --- a/tests/integration/files/conf/cloud.profiles.d/azure.conf +++ b/tests/integration/files/conf/cloud.profiles.d/azure.conf @@ -1,10 +1,8 @@ azure-test: - provider: azure-config + provider: azurearm-config image: 'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB' - size: Medium - location: West US + size: Standard_D1 slot: production ssh_username: '' ssh_password: '' - media_link: '' script_args: '-P' diff --git a/tests/integration/files/conf/cloud.profiles.d/ec2.conf b/tests/integration/files/conf/cloud.profiles.d/ec2.conf index b04aeb015548..771b67857e2f 100644 --- a/tests/integration/files/conf/cloud.profiles.d/ec2.conf +++ b/tests/integration/files/conf/cloud.profiles.d/ec2.conf @@ -1,6 +1,6 @@ ec2-test: provider: ec2-config - image: ami-3ecc8f46 + image: '' size: c5.large sh_username: centos script_args: '-P' @@ -8,7 +8,7 @@ ec2-test: ec2-win2012r2-test: provider: ec2-config size: c5.large - image: ami-02e27664434db6def + image: '' smb_port: 445 win_installer: '' win_username: Administrator @@ -22,7 +22,7 @@ ec2-win2012r2-test: ec2-win2016-test: provider: ec2-config size: c5.large - image: ami-017bf00eb0d4c7182 + image: '' smb_port: 445 win_installer: '' win_username: Administrator diff --git a/tests/integration/files/conf/cloud.providers.d/azure.conf b/tests/integration/files/conf/cloud.providers.d/azure.conf deleted file mode 100644 index 40ce733e4b4d..000000000000 --- a/tests/integration/files/conf/cloud.providers.d/azure.conf +++ /dev/null @@ -1,7 +0,0 @@ -azure-config: - driver: azure - subscription_id: '' - certificate_path: '' - cleanup_disks: True - cleanup_vhds: True - cleanup_services: True diff --git a/tests/integration/files/conf/cloud.providers.d/azurearm.conf b/tests/integration/files/conf/cloud.providers.d/azurearm.conf new file mode 100644 index 000000000000..60c34667c1e0 --- /dev/null +++ b/tests/integration/files/conf/cloud.providers.d/azurearm.conf @@ -0,0 +1,16 @@ +azurearm-config: + driver: azurearm + subscription_id: '' + cleanup_disks: True + cleanup_interfaces: True + cleanup_vhds: True + cleanup_services: True + minion: + master_type: str + username: '' + password: '' + location: '' + network_resource_group: '' + network: '' + subnet: '' + resource_group: ''