From 77e63e2cca44534a7ad0777f0ef97d4280bb62ff Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 15 Jul 2022 07:24:14 +0200 Subject: [PATCH] Rewrite docker_container to use Docker API directly (#422) * Begin experiments for docker_container rewrite. * Continued. * We support API >= 1.25 only anyway. * Continued. * Fix bugs. * Complete first basic implementation. * Continuing. * Improvements and fixes. * Continuing. * More 'easy' options. * More options. * Work on volumes and mounts. * Add more options. * The last option. * Copy over. * Fix exposed ports. * Fix bugs. * Fix command and entrypoint. * More fixes. * Fix more bugs. * ci_complete * Lint, fix Python 2.7 bugs, work around ansible-test bug. ci_complete * Remove no longer applicable test. ci_complete * Remove unnecessary ignore. ci_complete * Start with engine driver. * Refactoring. * Avoid using anything Docker specific from self.client. * Refactor. * Add Python 2.6 ignore.txt entries for ansible-core < 2.12. * Improve healthcheck handling. * Fix container removal logic. * ci_complete * Remove handling of older Docker SDK for Pyhon versions from integration tests. * Avoid recreation if a pure update is possible without losing the diff data. * Cover the case that blkio_weight does not work. * Update plugins/module_utils/module_container/docker_api.py Co-authored-by: Brian Scholer <1260690+briantist@users.noreply.github.com> * Improve memory_swap tests. * Fix URLs in changelog fragment. Co-authored-by: Brian Scholer <1260690+briantist@users.noreply.github.com> --- changelogs/fragments/docker_container.yml | 11 + plugins/module_utils/_api/api/client.py | 3 + plugins/module_utils/common.py | 5 +- plugins/module_utils/common_api.py | 3 +- plugins/module_utils/module_container/base.py | 1185 ++++++++ .../module_container/docker_api.py | 1327 +++++++++ .../module_utils/module_container/module.py | 803 ++++++ plugins/module_utils/util.py | 78 +- plugins/modules/docker_container.py | 2432 +---------------- .../targets/docker_container/tasks/main.yml | 5 +- .../tasks/tests/mounts-volumes.yml | 22 - .../docker_container/tasks/tests/network.yml | 1127 ++++---- .../docker_container/tasks/tests/options.yml | 160 +- .../docker_container/tasks/tests/update.yml | 172 ++ tests/sanity/ignore-2.10.txt | 4 +- tests/sanity/ignore-2.11.txt | 4 +- tests/sanity/ignore-2.12.txt | 1 - tests/sanity/ignore-2.13.txt | 1 - tests/sanity/ignore-2.14.txt | 1 - tests/sanity/ignore-2.9.txt | 4 +- .../plugins/modules/test_docker_container.py | 22 - 21 files changed, 4127 insertions(+), 3243 deletions(-) create mode 100644 changelogs/fragments/docker_container.yml create mode 100644 plugins/module_utils/module_container/base.py create mode 100644 plugins/module_utils/module_container/docker_api.py create mode 100644 plugins/module_utils/module_container/module.py create mode 100644 tests/integration/targets/docker_container/tasks/tests/update.yml delete mode 100644 tests/unit/plugins/modules/test_docker_container.py diff --git a/changelogs/fragments/docker_container.yml b/changelogs/fragments/docker_container.yml new file mode 100644 index 000000000..085f2b061 --- /dev/null +++ b/changelogs/fragments/docker_container.yml @@ -0,0 +1,11 @@ +major_changes: + - "docker_container - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, + and depending on the features used has some more requirements. If the Docker SDK for Python is installed, + these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - the module was completely rewritten from scratch (https://github.com/ansible-collections/community.docker/pull/422)." +breaking_changes: + - "docker_container - ``publish_all_ports`` is no longer ignored in ``comparisons`` (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - ``exposed_ports`` is no longer ignored in ``comparisons``. Before, its value was assumed to be identical with the value of ``published_ports`` (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - ``log_options`` can no longer be specified when ``log_driver`` is not specified (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - ``restart_retries`` can no longer be specified when ``restart_policy`` is not specified (https://github.com/ansible-collections/community.docker/pull/422)." + - "docker_container - ``stop_timeout`` is no longer ignored for idempotency if told to be not ignored in ``comparisons``. So far it defaulted to ``ignore`` there, and setting it to ``strict`` had no effect (https://github.com/ansible-collections/community.docker/pull/422)." diff --git a/plugins/module_utils/_api/api/client.py b/plugins/module_utils/_api/api/client.py index 554393253..b930cb95f 100644 --- a/plugins/module_utils/_api/api/client.py +++ b/plugins/module_utils/_api/api/client.py @@ -545,6 +545,9 @@ def delete_call(self, pathfmt, *args, **kwargs): def delete_json(self, pathfmt, *args, **kwargs): return self._result(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True) + def post_call(self, pathfmt, *args, **kwargs): + self._raise_for_status(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs)) + def post_json(self, pathfmt, *args, **kwargs): data = kwargs.pop('data', None) self._raise_for_status(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs)) diff --git a/plugins/module_utils/common.py b/plugins/module_utils/common.py index 3081941bd..e889baee8 100644 --- a/plugins/module_utils/common.py +++ b/plugins/module_utils/common.py @@ -557,8 +557,8 @@ def inspect_distribution(self, image, **kwargs): class AnsibleDockerClient(AnsibleDockerClientBase): def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, - required_together=None, required_if=None, required_one_of=None, min_docker_version=None, - min_docker_api_version=None, option_minimal_versions=None, + required_together=None, required_if=None, required_one_of=None, required_by=None, + min_docker_version=None, min_docker_api_version=None, option_minimal_versions=None, option_minimal_versions_ignore_params=None, fail_results=None): # Modules can put information in here which will always be returned @@ -588,6 +588,7 @@ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclu required_together=required_together_params, required_if=required_if, required_one_of=required_one_of, + required_by=required_by or {}, ) self.debug = self.module.params.get('debug') diff --git a/plugins/module_utils/common_api.py b/plugins/module_utils/common_api.py index da844d441..0ab2111f4 100644 --- a/plugins/module_utils/common_api.py +++ b/plugins/module_utils/common_api.py @@ -467,7 +467,7 @@ def pull_image(self, name, tag="latest", platform=None): class AnsibleDockerClient(AnsibleDockerClientBase): def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, - required_together=None, required_if=None, required_one_of=None, + required_together=None, required_if=None, required_one_of=None, required_by=None, min_docker_api_version=None, option_minimal_versions=None, option_minimal_versions_ignore_params=None, fail_results=None): @@ -498,6 +498,7 @@ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclu required_together=required_together_params, required_if=required_if, required_one_of=required_one_of, + required_by=required_by or {}, ) self.debug = self.module.params.get('debug') diff --git a/plugins/module_utils/module_container/base.py b/plugins/module_utils/module_container/base.py new file mode 100644 index 000000000..97b84bf04 --- /dev/null +++ b/plugins/module_utils/module_container/base.py @@ -0,0 +1,1185 @@ +# Copyright (c) 2022 Felix Fontein +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import abc +import os +import re +import shlex + +from functools import partial + +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible.module_utils.six import string_types + +from ansible_collections.community.docker.plugins.module_utils.util import ( + clean_dict_booleans_for_docker_api, + normalize_healthcheck, + omit_none_from_dict, +) + +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + parse_env_file, +) + + +_DEFAULT_IP_REPLACEMENT_STRING = '[[DEFAULT_IP:iewahhaeB4Sae6Aen8IeShairoh4zeph7xaekoh8Geingunaesaeweiy3ooleiwi]]' + + +_MOUNT_OPTION_TYPES = dict( + volume_driver='volume', + volume_options='volume', + propagation='bind', + no_copy='volume', + labels='volume', + tmpfs_size='tmpfs', + tmpfs_mode='tmpfs', +) + + +def _get_ansible_type(type): + if type == 'set': + return 'list' + if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'): + raise Exception('Invalid type "%s"' % (type, )) + return type + + +class Option(object): + def __init__( + self, + name, + type, + owner, + ansible_type=None, + elements=None, + ansible_elements=None, + ansible_suboptions=None, + ansible_aliases=None, + ansible_choices=None, + needs_no_suboptions=False, + default_comparison=None, + not_a_container_option=False, + not_an_ansible_option=False, + copy_comparison_from=None, + ): + self.name = name + self.type = type + self.ansible_type = ansible_type or _get_ansible_type(type) + needs_elements = self.type in ('list', 'set') + needs_ansible_elements = self.ansible_type in ('list', ) + if elements is not None and not needs_elements: + raise Exception('elements only allowed for lists/sets') + if elements is None and needs_elements: + raise Exception('elements required for lists/sets') + if ansible_elements is not None and not needs_ansible_elements: + raise Exception('Ansible elements only allowed for Ansible lists') + if (elements is None and ansible_elements is None) and needs_ansible_elements: + raise Exception('Ansible elements required for Ansible lists') + self.elements = elements if needs_elements else None + self.ansible_elements = (ansible_elements or _get_ansible_type(elements)) if needs_ansible_elements else None + needs_suboptions = (self.ansible_type == 'list' and self.ansible_elements == 'dict') or (self.ansible_type == 'dict') + if ansible_suboptions is not None and not needs_suboptions: + raise Exception('suboptions only allowed for Ansible lists with dicts, or Ansible dicts') + if ansible_suboptions is None and needs_suboptions and not needs_no_suboptions and not not_an_ansible_option: + raise Exception('suboptions required for Ansible lists with dicts, or Ansible dicts') + self.ansible_suboptions = ansible_suboptions if needs_suboptions else None + self.ansible_aliases = ansible_aliases or [] + self.ansible_choices = ansible_choices + comparison_type = self.type + if comparison_type == 'set' and self.elements == 'dict': + comparison_type = 'set(dict)' + elif comparison_type not in ('set', 'list', 'dict'): + comparison_type = 'value' + self.comparison_type = comparison_type + if default_comparison is not None: + self.comparison = default_comparison + elif comparison_type in ('list', 'value'): + self.comparison = 'strict' + else: + self.comparison = 'allow_more_present' + self.not_a_container_option = not_a_container_option + self.not_an_ansible_option = not_an_ansible_option + self.copy_comparison_from = copy_comparison_from + + +class OptionGroup(object): + def __init__( + self, + preprocess=None, + ansible_mutually_exclusive=None, + ansible_required_together=None, + ansible_required_one_of=None, + ansible_required_if=None, + ansible_required_by=None, + ): + if preprocess is None: + def preprocess(module, values): + return values + self.preprocess = preprocess + self.options = [] + self.engines = {} + self.ansible_mutually_exclusive = ansible_mutually_exclusive or [] + self.ansible_required_together = ansible_required_together or [] + self.ansible_required_one_of = ansible_required_one_of or [] + self.ansible_required_if = ansible_required_if or [] + self.ansible_required_by = ansible_required_by or {} + self.argument_spec = {} + + def add_option(self, *args, **kwargs): + option = Option(*args, owner=self, **kwargs) + if not option.not_a_container_option: + self.options.append(option) + if not option.not_an_ansible_option: + ansible_option = { + 'type': option.ansible_type, + } + if option.ansible_elements is not None: + ansible_option['elements'] = option.ansible_elements + if option.ansible_suboptions is not None: + ansible_option['options'] = option.ansible_suboptions + if option.ansible_aliases: + ansible_option['aliases'] = option.ansible_aliases + if option.ansible_choices is not None: + ansible_option['choices'] = option.ansible_choices + self.argument_spec[option.name] = ansible_option + return self + + def supports_engine(self, engine_name): + return engine_name in self.engines + + def get_engine(self, engine_name): + return self.engines[engine_name] + + def add_engine(self, engine_name, engine): + self.engines[engine_name] = engine + return self + + +class Engine(object): + min_api_version = None # string or None + min_api_version_obj = None # LooseVersion object or None + + @abc.abstractmethod + def get_value(self, module, container, api_version, options): + pass + + @abc.abstractmethod + def set_value(self, module, data, api_version, options, values): + pass + + @abc.abstractmethod + def get_expected_values(self, module, client, api_version, options, image, values): + pass + + @abc.abstractmethod + def ignore_mismatching_result(self, module, client, api_version, option, image, container_value, expected_value): + pass + + @abc.abstractmethod + def preprocess_value(self, module, client, api_version, options, values): + pass + + @abc.abstractmethod + def update_value(self, module, data, api_version, options, values): + pass + + @abc.abstractmethod + def can_set_value(self, api_version): + pass + + @abc.abstractmethod + def can_update_value(self, api_version): + pass + + +class EngineDriver(object): + name = None # string + + @abc.abstractmethod + def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None): + # Return (module, active_options, client) + pass + + @abc.abstractmethod + def get_api_version(self, client): + pass + + @abc.abstractmethod + def get_container_id(self, container): + pass + + @abc.abstractmethod + def get_image_from_container(self, container): + pass + + @abc.abstractmethod + def is_container_removing(self, container): + pass + + @abc.abstractmethod + def is_container_running(self, container): + pass + + @abc.abstractmethod + def is_container_paused(self, container): + pass + + @abc.abstractmethod + def inspect_container_by_name(self, client, container_name): + pass + + @abc.abstractmethod + def inspect_container_by_id(self, client, container_id): + pass + + @abc.abstractmethod + def inspect_image_by_id(self, client, image_id): + pass + + @abc.abstractmethod + def inspect_image_by_name(self, client, repository, tag): + pass + + @abc.abstractmethod + def pull_image(self, client, repository, tag): + pass + + @abc.abstractmethod + def pause_container(self, client, container_id): + pass + + @abc.abstractmethod + def unpause_container(self, client, container_id): + pass + + @abc.abstractmethod + def disconnect_container_from_network(self, client, container_id, network_id): + pass + + @abc.abstractmethod + def connect_container_to_network(self, client, container_id, network_id, parameters=None): + pass + + @abc.abstractmethod + def create_container(self, client, container_name, create_parameters): + pass + + @abc.abstractmethod + def start_container(self, client, container_id): + pass + + @abc.abstractmethod + def wait_for_container(self, client, container_id): + pass + + @abc.abstractmethod + def get_container_output(self, client, container_id): + pass + + @abc.abstractmethod + def update_container(self, client, container_id, update_parameters): + pass + + @abc.abstractmethod + def restart_container(self, client, container_id, timeout=None): + pass + + @abc.abstractmethod + def kill_container(self, client, container_id, kill_signal=None): + pass + + @abc.abstractmethod + def stop_container(self, client, container_id, timeout=None): + pass + + @abc.abstractmethod + def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False): + pass + + @abc.abstractmethod + def run(self, runner, client): + pass + + +def _is_volume_permissions(mode): + for part in mode.split(','): + if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): + return False + return True + + +def _parse_port_range(range_or_port, module): + ''' + Parses a string containing either a single port or a range of ports. + + Returns a list of integers for each port in the list. + ''' + if '-' in range_or_port: + try: + start, end = [int(port) for port in range_or_port.split('-')] + except Exception: + module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) + if end < start: + module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port)) + return list(range(start, end + 1)) + else: + try: + return [int(range_or_port)] + except Exception: + module.fail_json(msg='Invalid port: "{0}"'.format(range_or_port)) + + +def _split_colon_ipv6(text, module): + ''' + Split string by ':', while keeping IPv6 addresses in square brackets in one component. + ''' + if '[' not in text: + return text.split(':') + start = 0 + result = [] + while start < len(text): + i = text.find('[', start) + if i < 0: + result.extend(text[start:].split(':')) + break + j = text.find(']', i) + if j < 0: + module.fail_json(msg='Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) + result.extend(text[start:i].split(':')) + k = text.find(':', j) + if k < 0: + result[-1] += text[i:] + start = len(text) + else: + result[-1] += text[i:k] + if k == len(text): + result.append('') + break + start = k + 1 + return result + + +def _preprocess_command(module, values): + if 'command' not in values: + return values + value = values['command'] + if module.params['command_handling'] == 'correct': + if value is not None: + if not isinstance(value, list): + # convert from str to list + value = shlex.split(to_text(value, errors='surrogate_or_strict')) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + elif value: + # convert from list to str + if isinstance(value, list): + value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value])) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + else: + value = shlex.split(to_text(value, errors='surrogate_or_strict')) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + else: + return {} + return { + 'command': value, + } + + +def _preprocess_entrypoint(module, values): + if 'entrypoint' not in values: + return values + value = values['entrypoint'] + if module.params['command_handling'] == 'correct': + if value is not None: + value = [to_text(x, errors='surrogate_or_strict') for x in value] + elif value: + # convert from list to str. + value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value])) + value = [to_text(x, errors='surrogate_or_strict') for x in value] + else: + return {} + return { + 'entrypoint': value, + } + + +def _preprocess_env(module, values): + if not values: + return {} + final_env = {} + if 'env_file' in values: + parsed_env_file = parse_env_file(values['env_file']) + for name, value in parsed_env_file.items(): + final_env[name] = to_text(value, errors='surrogate_or_strict') + if 'env' in values: + for name, value in values['env'].items(): + if not isinstance(value, string_types): + module.fail_json(msg='Non-string value found for env option. Ambiguous env options must be ' + 'wrapped in quotes to avoid them being interpreted. Key: %s' % (name, )) + final_env[name] = to_text(value, errors='surrogate_or_strict') + formatted_env = [] + for key, value in final_env.items(): + formatted_env.append('%s=%s' % (key, value)) + return { + 'env': formatted_env, + } + + +def _preprocess_healthcheck(module, values): + if not values: + return {} + return { + 'healthcheck': normalize_healthcheck(values['healthcheck'], normalize_test=False), + } + + +def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): + if name not in values: + return values + try: + value = values[name] + if unlimited_value is not None and value in ('unlimited', str(unlimited_value)): + value = unlimited_value + else: + value = human_to_bytes(value) + values[name] = value + return values + except ValueError as exc: + module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + + +def _preprocess_mac_address(module, values): + if 'mac_address' not in values: + return values + return { + 'mac_address': values['mac_address'].replace('-', ':'), + } + + +def _preprocess_networks(module, values): + if module.params['networks_cli_compatible'] is True and values.get('networks') and 'network_mode' not in values: + # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode + # (assuming no explicit value is specified for network_mode) + values['network_mode'] = values['networks'][0]['name'] + + if 'networks' in values: + for network in values['networks']: + if network['links']: + parsed_links = [] + for link in network['links']: + parsed_link = link.split(':', 1) + if len(parsed_link) == 1: + parsed_link = (link, link) + parsed_links.append(tuple(parsed_link)) + network['links'] = parsed_links + + return values + + +def _preprocess_sysctls(module, values): + if 'sysctls' in values: + for key, value in values['sysctls'].items(): + values['sysctls'][key] = to_text(value, errors='surrogate_or_strict') + return values + + +def _preprocess_tmpfs(module, values): + if 'tmpfs' not in values: + return values + result = {} + for tmpfs_spec in values['tmpfs']: + split_spec = tmpfs_spec.split(":", 1) + if len(split_spec) > 1: + result[split_spec[0]] = split_spec[1] + else: + result[split_spec[0]] = "" + return { + 'tmpfs': result + } + + +def _preprocess_ulimits(module, values): + if 'ulimits' not in values: + return values + result = [] + for limit in values['ulimits']: + limits = dict() + pieces = limit.split(':') + if len(pieces) >= 2: + limits['Name'] = pieces[0] + limits['Soft'] = int(pieces[1]) + limits['Hard'] = int(pieces[1]) + if len(pieces) == 3: + limits['Hard'] = int(pieces[2]) + result.append(limits) + return { + 'ulimits': result, + } + + +def _preprocess_mounts(module, values): + last = dict() + + def check_collision(t, name): + if t in last: + if name == last[t]: + module.fail_json(msg='The mount point "{0}" appears twice in the {1} option'.format(t, name)) + else: + module.fail_json(msg='The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) + last[t] = name + + if 'mounts' in values: + mounts = [] + for mount in values['mounts']: + target = mount['target'] + mount_type = mount['type'] + + check_collision(target, 'mounts') + + mount_dict = dict(mount) + + # Sanity checks + if mount['source'] is None and mount_type not in ('tmpfs', 'volume'): + module.fail_json(msg='source must be specified for mount "{0}" of type "{1}"'.format(target, mount_type)) + for option, req_mount_type in _MOUNT_OPTION_TYPES.items(): + if mount[option] is not None and mount_type != req_mount_type: + module.fail_json( + msg='{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type) + ) + + # Streamline options + volume_options = mount_dict.pop('volume_options') + if mount_dict['volume_driver'] and volume_options: + mount_dict['volume_options'] = clean_dict_booleans_for_docker_api(volume_options) + if mount_dict['labels']: + mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels']) + if mount_dict['tmpfs_size'] is not None: + try: + mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) + except ValueError as exc: + module.fail_json(msg='Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, to_native(exc))) + if mount_dict['tmpfs_mode'] is not None: + try: + mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) + except Exception as dummy: + module.fail_json(msg='tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) + + # Add result to list + mounts.append(omit_none_from_dict(mount_dict)) + values['mounts'] = mounts + if 'volumes' in values: + new_vols = [] + for vol in values['volumes']: + parts = vol.split(':') + if ':' in vol: + if len(parts) == 3: + host, container, mode = parts + if not _is_volume_permissions(mode): + module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) + if re.match(r'[.~]', host): + host = os.path.abspath(os.path.expanduser(host)) + check_collision(container, 'volumes') + new_vols.append("%s:%s:%s" % (host, container, mode)) + continue + elif len(parts) == 2: + if not _is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): + host = os.path.abspath(os.path.expanduser(parts[0])) + check_collision(parts[1], 'volumes') + new_vols.append("%s:%s:rw" % (host, parts[1])) + continue + check_collision(parts[min(1, len(parts) - 1)], 'volumes') + new_vols.append(vol) + values['volumes'] = new_vols + new_binds = [] + for vol in new_vols: + host = None + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + host, container, mode = parts + if not _is_volume_permissions(mode): + module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode)) + elif len(parts) == 2: + if not _is_volume_permissions(parts[1]): + host, container, mode = (parts + ['rw']) + if host is not None: + new_binds.append('%s:%s:%s' % (host, container, mode)) + values['volume_binds'] = new_binds + return values + + +def _preprocess_log(module, values): + result = {} + if 'log_driver' not in values: + return result + result['log_driver'] = values['log_driver'] + if 'log_options' in values: + options = {} + for k, v in values['log_options'].items(): + if not isinstance(v, string_types): + module.warn( + "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " + "If this is not correct, or you want to avoid such warnings, please quote the value." % ( + k, to_text(v, errors='surrogate_or_strict')) + ) + v = to_text(v, errors='surrogate_or_strict') + options[k] = v + result['log_options'] = options + return result + + +def _preprocess_ports(module, values): + if 'published_ports' in values: + if 'all' in values['published_ports']: + module.fail_json( + msg='Specifying "all" in published_ports is no longer allowed. Set publish_all_ports to "true" instead ' + 'to randomly assign port mappings for those not specified by published_ports.') + + binds = {} + for port in values['published_ports']: + parts = _split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), module) + container_port = parts[-1] + protocol = '' + if '/' in container_port: + container_port, protocol = parts[-1].split('/') + container_ports = _parse_port_range(container_port, module) + + p_len = len(parts) + if p_len == 1: + port_binds = len(container_ports) * [(_DEFAULT_IP_REPLACEMENT_STRING, )] + elif p_len == 2: + if len(container_ports) == 1: + port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, parts[0])] + else: + port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, port) for port in _parse_port_range(parts[0], module)] + elif p_len == 3: + # We only allow IPv4 and IPv6 addresses for the bind address + ipaddr = parts[0] + if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr): + module.fail_json( + msg='Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' + 'Use the dig lookup to resolve hostnames. (Found hostname: {0})'.format(ipaddr) + ) + if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): + ipaddr = ipaddr[1:-1] + if parts[1]: + if len(container_ports) == 1: + port_binds = [(ipaddr, parts[1])] + else: + port_binds = [(ipaddr, port) for port in _parse_port_range(parts[1], module)] + else: + port_binds = len(container_ports) * [(ipaddr,)] + else: + module.fail_json( + msg='Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. ' + 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?' % (port, p_len) + ) + + for bind, container_port in zip(port_binds, container_ports): + idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port + if idx in binds: + old_bind = binds[idx] + if isinstance(old_bind, list): + old_bind.append(bind) + else: + binds[idx] = [old_bind, bind] + else: + binds[idx] = bind + values['published_ports'] = binds + + exposed = [] + if 'exposed_ports' in values: + for port in values['exposed_ports']: + port = to_text(port, errors='surrogate_or_strict').strip() + protocol = 'tcp' + match = re.search(r'(/.+$)', port) + if match: + protocol = match.group(1).replace('/', '') + port = re.sub(r'/.+$', '', port) + exposed.append((port, protocol)) + if 'published_ports' in values: + # Any published port should also be exposed + for publish_port in values['published_ports']: + match = False + if isinstance(publish_port, string_types) and '/' in publish_port: + port, protocol = publish_port.split('/') + port = int(port) + else: + protocol = 'tcp' + port = int(publish_port) + for exposed_port in exposed: + if exposed_port[1] != protocol: + continue + if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]: + start_port, end_port = exposed_port[0].split('-') + if int(start_port) <= port <= int(end_port): + match = True + elif exposed_port[0] == port: + match = True + if not match: + exposed.append((port, protocol)) + values['ports'] = exposed + return values + + +OPTION_AUTO_REMOVE = ( + OptionGroup() + .add_option('auto_remove', type='bool') +) + +OPTION_BLKIO_WEIGHT = ( + OptionGroup() + .add_option('blkio_weight', type='int') +) + +OPTION_CAPABILITIES = ( + OptionGroup() + .add_option('capabilities', type='set', elements='str') +) + +OPTION_CAP_DROP = ( + OptionGroup() + .add_option('cap_drop', type='set', elements='str') +) + +OPTION_CGROUP_PARENT = ( + OptionGroup() + .add_option('cgroup_parent', type='str') +) + +OPTION_COMMAND = ( + OptionGroup(preprocess=_preprocess_command) + .add_option('command', type='list', elements='str', ansible_type='raw') +) + +OPTION_CPU_PERIOD = ( + OptionGroup() + .add_option('cpu_period', type='int') +) + +OPTION_CPU_QUOTA = ( + OptionGroup() + .add_option('cpu_quota', type='int') +) + +OPTION_CPUSET_CPUS = ( + OptionGroup() + .add_option('cpuset_cpus', type='str') +) + +OPTION_CPUSET_MEMS = ( + OptionGroup() + .add_option('cpuset_mems', type='str') +) + +OPTION_CPU_SHARES = ( + OptionGroup() + .add_option('cpu_shares', type='int') +) + +OPTION_ENTRYPOINT = ( + OptionGroup(preprocess=_preprocess_entrypoint) + .add_option('entrypoint', type='list', elements='str') +) + +OPTION_CPUS = ( + OptionGroup() + .add_option('cpus', type='int', ansible_type='float') +) + +OPTION_DETACH_INTERACTIVE = ( + OptionGroup() + .add_option('detach', type='bool') + .add_option('interactive', type='bool') +) + +OPTION_DEVICES = ( + OptionGroup() + .add_option('devices', type='set', elements='dict', ansible_elements='str') +) + +OPTION_DEVICE_READ_BPS = ( + OptionGroup() + .add_option('device_read_bps', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )) +) + +OPTION_DEVICE_WRITE_BPS = ( + OptionGroup() + .add_option('device_write_bps', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='str'), + )) +) + +OPTION_DEVICE_READ_IOPS = ( + OptionGroup() + .add_option('device_read_iops', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )) +) + +OPTION_DEVICE_WRITE_IOPS = ( + OptionGroup() + .add_option('device_write_iops', type='set', elements='dict', ansible_suboptions=dict( + path=dict(required=True, type='str'), + rate=dict(required=True, type='int'), + )) +) + +OPTION_DEVICE_REQUESTS = ( + OptionGroup() + .add_option('device_requests', type='set', elements='dict', ansible_suboptions=dict( + capabilities=dict(type='list', elements='list'), + count=dict(type='int'), + device_ids=dict(type='list', elements='str'), + driver=dict(type='str'), + options=dict(type='dict'), + )) +) + +OPTION_DNS_SERVERS = ( + OptionGroup() + .add_option('dns_servers', type='list', elements='str') +) + +OPTION_DNS_OPTS = ( + OptionGroup() + .add_option('dns_opts', type='set', elements='str') +) + +OPTION_DNS_SEARCH_DOMAINS = ( + OptionGroup() + .add_option('dns_search_domains', type='list', elements='str') +) + +OPTION_DOMAINNAME = ( + OptionGroup() + .add_option('domainname', type='str') +) + +OPTION_ENVIRONMENT = ( + OptionGroup(preprocess=_preprocess_env) + .add_option('env', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True) + .add_option('env_file', type='set', ansible_type='path', elements='str', not_a_container_option=True) +) + +OPTION_ETC_HOSTS = ( + OptionGroup() + .add_option('etc_hosts', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True) +) + +OPTION_GROUPS = ( + OptionGroup() + .add_option('groups', type='set', elements='str') +) + +OPTION_HEALTHCHECK = ( + OptionGroup(preprocess=_preprocess_healthcheck) + .add_option('healthcheck', type='dict', ansible_suboptions=dict( + test=dict(type='raw'), + interval=dict(type='str'), + timeout=dict(type='str'), + start_period=dict(type='str'), + retries=dict(type='int'), + )) +) + +OPTION_HOSTNAME = ( + OptionGroup() + .add_option('hostname', type='str') +) + +OPTION_IMAGE = ( + OptionGroup(preprocess=_preprocess_networks) + .add_option('image', type='str') +) + +OPTION_INIT = ( + OptionGroup() + .add_option('init', type='bool') +) + +OPTION_IPC_MODE = ( + OptionGroup() + .add_option('ipc_mode', type='str') +) + +OPTION_KERNEL_MEMORY = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory')) + .add_option('kernel_memory', type='int', ansible_type='str') +) + +OPTION_LABELS = ( + OptionGroup() + .add_option('labels', type='dict', needs_no_suboptions=True) +) + +OPTION_LINKS = ( + OptionGroup() + .add_option('links', type='set', elements='list', ansible_elements='str') +) + +OPTION_LOG_DRIVER_OPTIONS = ( + OptionGroup(preprocess=_preprocess_log, ansible_required_by={'log_options': ['log_driver']}) + .add_option('log_driver', type='str') + .add_option('log_options', type='dict', ansible_aliases=['log_opt'], needs_no_suboptions=True) +) + +OPTION_MAC_ADDRESS = ( + OptionGroup(preprocess=_preprocess_mac_address) + .add_option('mac_address', type='str') +) + +OPTION_MEMORY = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory')) + .add_option('memory', type='int', ansible_type='str') +) + +OPTION_MEMORY_RESERVATION = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation')) + .add_option('memory_reservation', type='int', ansible_type='str') +) + +OPTION_MEMORY_SWAP = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1)) + .add_option('memory_swap', type='int', ansible_type='str') +) + +OPTION_MEMORY_SWAPPINESS = ( + OptionGroup() + .add_option('memory_swappiness', type='int') +) + +OPTION_STOP_TIMEOUT = ( + OptionGroup() + .add_option('stop_timeout', type='int', default_comparison='ignore') +) + +OPTION_NETWORK = ( + OptionGroup(preprocess=_preprocess_networks) + .add_option('network_mode', type='str') + .add_option('networks', type='set', elements='dict', ansible_suboptions=dict( + name=dict(type='str', required=True), + ipv4_address=dict(type='str'), + ipv6_address=dict(type='str'), + aliases=dict(type='list', elements='str'), + links=dict(type='list', elements='str'), + )) +) + +OPTION_OOM_KILLER = ( + OptionGroup() + .add_option('oom_killer', type='bool') +) + +OPTION_OOM_SCORE_ADJ = ( + OptionGroup() + .add_option('oom_score_adj', type='int') +) + +OPTION_PID_MODE = ( + OptionGroup() + .add_option('pid_mode', type='str') +) + +OPTION_PIDS_LIMIT = ( + OptionGroup() + .add_option('pids_limit', type='int') +) + +OPTION_PRIVILEGED = ( + OptionGroup() + .add_option('privileged', type='bool') +) + +OPTION_READ_ONLY = ( + OptionGroup() + .add_option('read_only', type='bool') +) + +OPTION_RESTART_POLICY = ( + OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']}) + .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped']) + .add_option('restart_retries', type='int') +) + +OPTION_RUNTIME = ( + OptionGroup() + .add_option('runtime', type='str') +) + +OPTION_SECURITY_OPTS = ( + OptionGroup() + .add_option('security_opts', type='set', elements='str') +) + +OPTION_SHM_SIZE = ( + OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='shm_size')) + .add_option('shm_size', type='int', ansible_type='str') +) + +OPTION_STOP_SIGNAL = ( + OptionGroup() + .add_option('stop_signal', type='str') +) + +OPTION_STORAGE_OPTS = ( + OptionGroup() + .add_option('storage_opts', type='dict', needs_no_suboptions=True) +) + +OPTION_SYSCTLS = ( + OptionGroup(preprocess=_preprocess_sysctls) + .add_option('sysctls', type='dict', needs_no_suboptions=True) +) + +OPTION_TMPFS = ( + OptionGroup(preprocess=_preprocess_tmpfs) + .add_option('tmpfs', type='dict', ansible_type='list', ansible_elements='str') +) + +OPTION_TTY = ( + OptionGroup() + .add_option('tty', type='bool') +) + +OPTION_ULIMITS = ( + OptionGroup(preprocess=_preprocess_ulimits) + .add_option('ulimits', type='set', elements='dict', ansible_elements='str') +) + +OPTION_USER = ( + OptionGroup() + .add_option('user', type='str') +) + +OPTION_USERNS_MODE = ( + OptionGroup() + .add_option('userns_mode', type='str') +) + +OPTION_UTS = ( + OptionGroup() + .add_option('uts', type='str') +) + +OPTION_VOLUME_DRIVER = ( + OptionGroup() + .add_option('volume_driver', type='str') +) + +OPTION_VOLUMES_FROM = ( + OptionGroup() + .add_option('volumes_from', type='set', elements='str') +) + +OPTION_WORKING_DIR = ( + OptionGroup() + .add_option('working_dir', type='str') +) + +OPTION_MOUNTS_VOLUMES = ( + OptionGroup(preprocess=_preprocess_mounts) + .add_option('mounts', type='set', elements='dict', ansible_suboptions=dict( + target=dict(type='str', required=True), + source=dict(type='str'), + type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), + read_only=dict(type='bool'), + consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), + propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), + no_copy=dict(type='bool'), + labels=dict(type='dict'), + volume_driver=dict(type='str'), + volume_options=dict(type='dict'), + tmpfs_size=dict(type='str'), + tmpfs_mode=dict(type='str'), + )) + .add_option('volumes', type='set', elements='str') + .add_option('volume_binds', type='set', elements='str', not_an_ansible_option=True, copy_comparison_from='volumes') +) + +OPTION_PORTS = ( + OptionGroup(preprocess=_preprocess_ports) + .add_option('exposed_ports', type='set', elements='str', ansible_aliases=['exposed', 'expose']) + .add_option('publish_all_ports', type='bool') + .add_option('published_ports', type='dict', ansible_type='list', ansible_elements='str', ansible_aliases=['ports']) + .add_option('ports', type='set', elements='str', not_an_ansible_option=True, default_comparison='ignore') +) + +OPTIONS = [ + OPTION_AUTO_REMOVE, + OPTION_BLKIO_WEIGHT, + OPTION_CAPABILITIES, + OPTION_CAP_DROP, + OPTION_CGROUP_PARENT, + OPTION_COMMAND, + OPTION_CPU_PERIOD, + OPTION_CPU_QUOTA, + OPTION_CPUSET_CPUS, + OPTION_CPUSET_MEMS, + OPTION_CPU_SHARES, + OPTION_ENTRYPOINT, + OPTION_CPUS, + OPTION_DETACH_INTERACTIVE, + OPTION_DEVICES, + OPTION_DEVICE_READ_BPS, + OPTION_DEVICE_WRITE_BPS, + OPTION_DEVICE_READ_IOPS, + OPTION_DEVICE_WRITE_IOPS, + OPTION_DEVICE_REQUESTS, + OPTION_DNS_SERVERS, + OPTION_DNS_OPTS, + OPTION_DNS_SEARCH_DOMAINS, + OPTION_DOMAINNAME, + OPTION_ENVIRONMENT, + OPTION_ETC_HOSTS, + OPTION_GROUPS, + OPTION_HEALTHCHECK, + OPTION_HOSTNAME, + OPTION_IMAGE, + OPTION_INIT, + OPTION_IPC_MODE, + OPTION_KERNEL_MEMORY, + OPTION_LABELS, + OPTION_LINKS, + OPTION_LOG_DRIVER_OPTIONS, + OPTION_MAC_ADDRESS, + OPTION_MEMORY, + OPTION_MEMORY_RESERVATION, + OPTION_MEMORY_SWAP, + OPTION_MEMORY_SWAPPINESS, + OPTION_STOP_TIMEOUT, + OPTION_NETWORK, + OPTION_OOM_KILLER, + OPTION_OOM_SCORE_ADJ, + OPTION_PID_MODE, + OPTION_PIDS_LIMIT, + OPTION_PRIVILEGED, + OPTION_READ_ONLY, + OPTION_RESTART_POLICY, + OPTION_RUNTIME, + OPTION_SECURITY_OPTS, + OPTION_SHM_SIZE, + OPTION_STOP_SIGNAL, + OPTION_STORAGE_OPTS, + OPTION_SYSCTLS, + OPTION_TMPFS, + OPTION_TTY, + OPTION_ULIMITS, + OPTION_USER, + OPTION_USERNS_MODE, + OPTION_UTS, + OPTION_VOLUME_DRIVER, + OPTION_VOLUMES_FROM, + OPTION_WORKING_DIR, + OPTION_MOUNTS_VOLUMES, + OPTION_PORTS, +] diff --git a/plugins/module_utils/module_container/docker_api.py b/plugins/module_utils/module_container/docker_api.py new file mode 100644 index 000000000..c4f213b12 --- /dev/null +++ b/plugins/module_utils/module_container/docker_api.py @@ -0,0 +1,1327 @@ +# Copyright (c) 2022 Felix Fontein +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json +import traceback + +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.formatters import human_to_bytes + +from ansible_collections.community.docker.plugins.module_utils.common_api import ( + AnsibleDockerClient, + RequestException, +) + +from ansible_collections.community.docker.plugins.module_utils.module_container.base import ( + OPTION_AUTO_REMOVE, + OPTION_BLKIO_WEIGHT, + OPTION_CAPABILITIES, + OPTION_CAP_DROP, + OPTION_CGROUP_PARENT, + OPTION_COMMAND, + OPTION_CPU_PERIOD, + OPTION_CPU_QUOTA, + OPTION_CPUSET_CPUS, + OPTION_CPUSET_MEMS, + OPTION_CPU_SHARES, + OPTION_ENTRYPOINT, + OPTION_CPUS, + OPTION_DETACH_INTERACTIVE, + OPTION_DEVICES, + OPTION_DEVICE_READ_BPS, + OPTION_DEVICE_WRITE_BPS, + OPTION_DEVICE_READ_IOPS, + OPTION_DEVICE_WRITE_IOPS, + OPTION_DEVICE_REQUESTS, + OPTION_DNS_SERVERS, + OPTION_DNS_OPTS, + OPTION_DNS_SEARCH_DOMAINS, + OPTION_DOMAINNAME, + OPTION_ENVIRONMENT, + OPTION_ETC_HOSTS, + OPTION_GROUPS, + OPTION_HEALTHCHECK, + OPTION_HOSTNAME, + OPTION_IMAGE, + OPTION_INIT, + OPTION_IPC_MODE, + OPTION_KERNEL_MEMORY, + OPTION_LABELS, + OPTION_LINKS, + OPTION_LOG_DRIVER_OPTIONS, + OPTION_MAC_ADDRESS, + OPTION_MEMORY, + OPTION_MEMORY_RESERVATION, + OPTION_MEMORY_SWAP, + OPTION_MEMORY_SWAPPINESS, + OPTION_STOP_TIMEOUT, + OPTION_NETWORK, + OPTION_OOM_KILLER, + OPTION_OOM_SCORE_ADJ, + OPTION_PID_MODE, + OPTION_PIDS_LIMIT, + OPTION_PRIVILEGED, + OPTION_READ_ONLY, + OPTION_RESTART_POLICY, + OPTION_RUNTIME, + OPTION_SECURITY_OPTS, + OPTION_SHM_SIZE, + OPTION_STOP_SIGNAL, + OPTION_STORAGE_OPTS, + OPTION_SYSCTLS, + OPTION_TMPFS, + OPTION_TTY, + OPTION_ULIMITS, + OPTION_USER, + OPTION_USERNS_MODE, + OPTION_UTS, + OPTION_VOLUME_DRIVER, + OPTION_VOLUMES_FROM, + OPTION_WORKING_DIR, + OPTION_MOUNTS_VOLUMES, + OPTION_PORTS, + OPTIONS, + Engine, + EngineDriver, +) + +from ansible_collections.community.docker.plugins.module_utils.util import ( + normalize_healthcheck_test, + omit_none_from_dict, +) + +from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion + +from ansible_collections.community.docker.plugins.module_utils._api.errors import ( + APIError, + DockerException, + NotFound, +) + +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + convert_port_bindings, + normalize_links, + parse_repository_tag, +) + + +_DEFAULT_IP_REPLACEMENT_STRING = '[[DEFAULT_IP:iewahhaeB4Sae6Aen8IeShairoh4zeph7xaekoh8Geingunaesaeweiy3ooleiwi]]' + + +_MOUNT_OPTION_TYPES = dict( + volume_driver='volume', + volume_options='volume', + propagation='bind', + no_copy='volume', + labels='volume', + tmpfs_size='tmpfs', + tmpfs_mode='tmpfs', +) + + +def _get_ansible_type(type): + if type == 'set': + return 'list' + if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'): + raise Exception('Invalid type "%s"' % (type, )) + return type + + +_SENTRY = object() + + +class DockerAPIEngineDriver(EngineDriver): + name = 'docker_api' + + def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None): + argument_spec = argument_spec or {} + mutually_exclusive = mutually_exclusive or [] + required_together = required_together or [] + required_one_of = required_one_of or [] + required_if = required_if or [] + required_by = required_by or {} + + active_options = [] + option_minimal_versions = {} + for options in OPTIONS: + if not options.supports_engine(self.name): + continue + + mutually_exclusive.extend(options.ansible_mutually_exclusive) + required_together.extend(options.ansible_required_together) + required_one_of.extend(options.ansible_required_one_of) + required_if.extend(options.ansible_required_if) + required_by.update(options.ansible_required_by) + argument_spec.update(options.argument_spec) + + engine = options.get_engine(self.name) + if engine.min_api_version is not None: + for option in options.options: + if not option.not_an_ansible_option: + option_minimal_versions[option.name] = {'docker_api_version': engine.min_api_version} + + active_options.append(options) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, + required_if=required_if, + required_by=required_by, + option_minimal_versions=option_minimal_versions, + supports_check_mode=True, + ) + + return client.module, active_options, client + + def get_api_version(self, client): + return client.docker_api_version + + def get_container_id(self, container): + return container['Id'] + + def get_image_from_container(self, container): + return container['Image'] + + def is_container_removing(self, container): + if container.get('State'): + return container['State'].get('Status') == 'removing' + return False + + def is_container_running(self, container): + if container.get('State'): + if container['State'].get('Running') and not container['State'].get('Ghost', False): + return True + return False + + def is_container_paused(self, container): + if container.get('State'): + return container['State'].get('Paused', False) + return False + + def inspect_container_by_name(self, client, container_name): + return client.get_container(container_name) + + def inspect_container_by_id(self, client, container_id): + return client.get_container_by_id(container_id) + + def inspect_image_by_id(self, client, image_id): + return client.find_image_by_id(image_id) + + def inspect_image_by_name(self, client, repository, tag): + return client.find_image(repository, tag) + + def pull_image(self, client, repository, tag): + return client.pull_image(repository, tag) + + def pause_container(self, client, container_id): + client.post_call('/containers/{0}/pause', container_id) + + def unpause_container(self, client, container_id): + client.post_call('/containers/{0}/unpause', container_id) + + def disconnect_container_from_network(self, client, container_id, network_id): + client.post_json('/networks/{0}/disconnect', network_id, data={'Container': container_id}) + + def connect_container_to_network(self, client, container_id, network_id, parameters=None): + parameters = (parameters or {}).copy() + params = {} + for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items(): + value = parameters.pop(para, None) + if value: + if para == 'links': + value = normalize_links(value) + params[dest_para] = value + if parameters: + raise Exception( + 'Unknown parameter(s) for connect_container_to_network for Docker API driver: %s' % (', '.join(['"%s"' % p for p in sorted(parameters)]))) + ipam_config = {} + for param in ('IPv4Address', 'IPv6Address'): + if param in params: + ipam_config[param] = params.pop(param) + if ipam_config: + params['IPAMConfig'] = ipam_config + data = { + 'Container': container_id, + 'EndpointConfig': params, + } + client.post_json('/networks/{0}/connect', network_id, data=data) + + def create_container(self, client, container_name, create_parameters): + params = {'name': container_name} + new_container = client.post_json_to_json('/containers/create', data=create_parameters, params=params) + client.report_warnings(new_container) + return new_container['Id'] + + def start_container(self, client, container_id): + client.post_json('/containers/{0}/start', container_id) + + def wait_for_container(self, client, container_id): + return client.post_json_to_json('/containers/{0}/wait', container_id)['StatusCode'] + + def get_container_output(self, client, container_id): + config = client.get_json('/containers/{0}/json', container_id) + logging_driver = config['HostConfig']['LogConfig']['Type'] + if logging_driver in ('json-file', 'journald', 'local'): + params = { + 'stderr': 1, + 'stdout': 1, + 'timestamps': 0, + 'follow': 0, + 'tail': 'all', + } + res = client._get(client._url('/containers/{0}/logs', container_id), params=params) + output = client._get_result_tty(False, res, config['Config']['Tty']) + return output, True + else: + return "Result logged using `%s` driver" % logging_driver, False + + def update_container(self, client, container_id, update_parameters): + result = client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters) + client.report_warnings(result) + + def restart_container(self, client, container_id, timeout=None): + client_timeout = client.timeout + if client_timeout is not None: + client_timeout += timeout or 10 + client.post_call('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout) + + def kill_container(self, client, container_id, kill_signal=None): + params = {} + if kill_signal is not None: + params['signal'] = int(kill_signal) + client.post_call('/containers/{0}/kill', container_id, params=params) + + def stop_container(self, client, container_id, timeout=None): + if timeout: + params = {'t': timeout} + else: + params = {} + timeout = 10 + client_timeout = client.timeout + if client_timeout is not None: + client_timeout += timeout + count = 0 + while True: + try: + client.post_call('/containers/{0}/stop', container_id, params=params, timeout=client_timeout) + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + raise Exception('%s [tried to unpause three times]' % to_native(exc)) + count += 1 + # Unpause + try: + self.unpause_container(client, container_id) + except Exception as exc2: + raise Exception('%s [while unpausing]' % to_native(exc2)) + # Now try again + continue + raise + # We only loop when explicitly requested by 'continue' + break + + def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False): + params = {'v': remove_volumes, 'link': link, 'force': force} + count = 0 + while True: + try: + client.delete_call('/containers/{0}', container_id, params=params) + except NotFound as dummy: + pass + except APIError as exc: + if 'Unpause the container before stopping or killing' in exc.explanation: + # New docker daemon versions do not allow containers to be removed + # if they are paused. Make sure we don't end up in an infinite loop. + if count == 3: + raise Exception('%s [tried to unpause three times]' % to_native(exc)) + count += 1 + # Unpause + try: + self.unpause_container(client, container_id) + except Exception as exc2: + raise Exception('%s [while unpausing]' % to_native(exc2)) + # Now try again + continue + if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: + pass + else: + raise + # We only loop when explicitly requested by 'continue' + break + + def run(self, runner, client): + try: + runner() + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + except RequestException as e: + client.fail( + 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + + +class DockerAPIEngine(Engine): + def __init__( + self, + get_value, + preprocess_value=None, + get_expected_values=None, + ignore_mismatching_result=None, + set_value=None, + update_value=None, + can_set_value=None, + can_update_value=None, + min_api_version=None, + ): + self.min_api_version = min_api_version + self.min_api_version_obj = None if min_api_version is None else LooseVersion(min_api_version) + self.get_value = get_value + self.set_value = set_value + self.get_expected_values = get_expected_values or (lambda module, client, api_version, options, image, values: values) + self.ignore_mismatching_result = ignore_mismatching_result or \ + (lambda module, client, api_version, option, image, container_value, expected_value: False) + self.preprocess_value = preprocess_value or (lambda module, client, api_version, options, values: values) + self.update_value = update_value + self.can_set_value = can_set_value or (lambda api_version: set_value is not None) + self.can_update_value = can_update_value or (lambda api_version: update_value is not None) + + @classmethod + def config_value( + cls, + config_name, + postprocess_for_get=None, + preprocess_for_set=None, + get_expected_value=None, + ignore_mismatching_result=None, + min_api_version=None, + preprocess_value=None, + update_parameter=None, + ): + def preprocess_value_(module, client, api_version, options, values): + if len(options) != 1: + raise AssertionError('config_value can only be used for a single option') + if preprocess_value is not None and options[0].name in values: + value = preprocess_value(module, client, api_version, values[options[0].name]) + if value is None: + del values[options[0].name] + else: + values[options[0].name] = value + return values + + def get_value(module, container, api_version, options): + if len(options) != 1: + raise AssertionError('config_value can only be used for a single option') + value = container['Config'].get(config_name, _SENTRY) + if postprocess_for_get: + value = postprocess_for_get(module, api_version, value, _SENTRY) + if value is _SENTRY: + return {} + return {options[0].name: value} + + get_expected_values_ = None + if get_expected_value: + def get_expected_values_(module, client, api_version, options, image, values): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + value = values.get(options[0].name, _SENTRY) + value = get_expected_value(module, client, api_version, image, value, _SENTRY) + if value is _SENTRY: + return values + return {options[0].name: value} + + def set_value(module, data, api_version, options, values): + if len(options) != 1: + raise AssertionError('config_value can only be used for a single option') + if options[0].name not in values: + return + value = values[options[0].name] + if preprocess_for_set: + value = preprocess_for_set(module, api_version, value) + data[config_name] = value + + update_value = None + if update_parameter: + def update_value(module, data, api_version, options, values): + if len(options) != 1: + raise AssertionError('update_parameter can only be used for a single option') + if options[0].name not in values: + return + value = values[options[0].name] + if preprocess_for_set: + value = preprocess_for_set(module, api_version, value) + data[update_parameter] = value + + return cls( + get_value=get_value, + preprocess_value=preprocess_value_, + get_expected_values=get_expected_values_, + ignore_mismatching_result=ignore_mismatching_result, + set_value=set_value, + min_api_version=min_api_version, + update_value=update_value, + ) + + @classmethod + def host_config_value( + cls, + host_config_name, + postprocess_for_get=None, + preprocess_for_set=None, + get_expected_value=None, + ignore_mismatching_result=None, + min_api_version=None, + preprocess_value=None, + update_parameter=None, + ): + def preprocess_value_(module, client, api_version, options, values): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + if preprocess_value is not None and options[0].name in values: + value = preprocess_value(module, client, api_version, values[options[0].name]) + if value is None: + del values[options[0].name] + else: + values[options[0].name] = value + return values + + def get_value(module, container, api_version, options): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + value = container['HostConfig'].get(host_config_name, _SENTRY) + if postprocess_for_get: + value = postprocess_for_get(module, api_version, value, _SENTRY) + if value is _SENTRY: + return {} + return {options[0].name: value} + + get_expected_values_ = None + if get_expected_value: + def get_expected_values_(module, client, api_version, options, image, values): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + value = values.get(options[0].name, _SENTRY) + value = get_expected_value(module, client, api_version, image, value, _SENTRY) + if value is _SENTRY: + return values + return {options[0].name: value} + + def set_value(module, data, api_version, options, values): + if len(options) != 1: + raise AssertionError('host_config_value can only be used for a single option') + if options[0].name not in values: + return + if 'HostConfig' not in data: + data['HostConfig'] = {} + value = values[options[0].name] + if preprocess_for_set: + value = preprocess_for_set(module, api_version, value) + data['HostConfig'][host_config_name] = value + + update_value = None + if update_parameter: + def update_value(module, data, api_version, options, values): + if len(options) != 1: + raise AssertionError('update_parameter can only be used for a single option') + if options[0].name not in values: + return + value = values[options[0].name] + if preprocess_for_set: + value = preprocess_for_set(module, api_version, value) + data[update_parameter] = value + + return cls( + get_value=get_value, + preprocess_value=preprocess_value_, + get_expected_values=get_expected_values_, + ignore_mismatching_result=ignore_mismatching_result, + set_value=set_value, + min_api_version=min_api_version, + update_value=update_value, + ) + + +def _is_volume_permissions(mode): + for part in mode.split(','): + if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): + return False + return True + + +def _normalize_port(port): + if '/' not in port: + return port + '/tcp' + return port + + +def _get_default_host_ip(module, client): + if module.params['default_host_ip'] is not None: + return module.params['default_host_ip'] + ip = '0.0.0.0' + for network_data in module.params['networks'] or []: + if network_data.get('name'): + network = client.get_network(network_data['name']) + if network is None: + client.fail( + "Cannot inspect the network '{0}' to determine the default IP".format(network_data['name']), + ) + if network.get('Driver') == 'bridge' and network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): + ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] + break + return ip + + +def _get_value_detach_interactive(module, container, api_version, options): + attach_stdin = container['Config'].get('OpenStdin') + attach_stderr = container['Config'].get('AttachStderr') + attach_stdout = container['Config'].get('AttachStdout') + return { + 'interactive': bool(attach_stdin), + 'detach': not (attach_stderr and attach_stdout), + } + + +def _set_value_detach_interactive(module, data, api_version, options, values): + interactive = values.get('interactive') + detach = values.get('detach') + + data['AttachStdout'] = False + data['AttachStderr'] = False + data['AttachStdin'] = False + data['StdinOnce'] = False + data['OpenStdin'] = interactive + if not detach: + data['AttachStdout'] = True + data['AttachStderr'] = True + if interactive: + data['AttachStdin'] = True + data['StdinOnce'] = True + + +def _get_expected_env_value(module, client, api_version, image, value, sentry): + expected_env = {} + if image and image['Config'].get('Env'): + for env_var in image['Config']['Env']: + parts = env_var.split('=', 1) + expected_env[parts[0]] = parts[1] + if value and value is not sentry: + for env_var in value: + parts = env_var.split('=', 1) + expected_env[parts[0]] = parts[1] + param_env = [] + for key, env_value in expected_env.items(): + param_env.append("%s=%s" % (key, env_value)) + return param_env + + +def _preprocess_cpus(module, client, api_version, value): + if value is not None: + value = int(round(value * 1E9)) + return value + + +def _preprocess_devices(module, client, api_version, value): + if not value: + return value + expected_devices = [] + for device in value: + parts = device.split(':') + if len(parts) == 1: + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[0], + PathOnHost=parts[0] + )) + elif len(parts) == 2: + parts = device.split(':') + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[1], + PathOnHost=parts[0] + ) + ) + else: + expected_devices.append( + dict( + CgroupPermissions=parts[2], + PathInContainer=parts[1], + PathOnHost=parts[0] + )) + return expected_devices + + +def _preprocess_rate_bps(module, client, api_version, value): + if not value: + return value + devices = [] + for device in value: + devices.append({ + 'Path': device['path'], + 'Rate': human_to_bytes(device['rate']), + }) + return devices + + +def _preprocess_rate_iops(module, client, api_version, value): + if not value: + return value + devices = [] + for device in value: + devices.append({ + 'Path': device['path'], + 'Rate': device['rate'], + }) + return devices + + +def _preprocess_device_requests(module, client, api_version, value): + if not value: + return value + device_requests = [] + for dr in value: + device_requests.append({ + 'Driver': dr['driver'], + 'Count': dr['count'], + 'DeviceIDs': dr['device_ids'], + 'Capabilities': dr['capabilities'], + 'Options': dr['options'], + }) + return device_requests + + +def _preprocess_etc_hosts(module, client, api_version, value): + if value is None: + return value + results = [] + for key, value in value.items(): + results.append('%s%s%s' % (key, ':', value)) + return results + + +def _preprocess_healthcheck(module, client, api_version, value): + if value is None: + return value + if not value or not value.get('test'): + value = {'test': ['NONE']} + elif 'test' in value: + value['test'] = normalize_healthcheck_test(value['test']) + return omit_none_from_dict({ + 'Test': value.get('test'), + 'Interval': value.get('interval'), + 'Timeout': value.get('timeout'), + 'StartPeriod': value.get('start_period'), + 'Retries': value.get('retries'), + }) + + +def _postprocess_healthcheck_get_value(module, api_version, value, sentry): + if value is None or value is sentry or value.get('Test') == ['NONE']: + return {'Test': ['NONE']} + return value + + +def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None): + if name not in values: + return values + try: + value = values[name] + if unlimited_value is not None and value in ('unlimited', str(unlimited_value)): + value = unlimited_value + else: + value = human_to_bytes(value) + values[name] = value + return values + except ValueError as exc: + module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + + +def _get_image_labels(image): + if not image: + return {} + + # Can't use get('Labels', {}) because 'Labels' may be present and be None + return image['Config'].get('Labels') or {} + + +def _get_expected_labels_value(module, client, api_version, image, value, sentry): + if value is sentry: + return sentry + expected_labels = {} + if module.params['image_label_mismatch'] == 'ignore': + expected_labels.update(dict(_get_image_labels(image))) + expected_labels.update(value) + return expected_labels + + +def _preprocess_links(module, client, api_version, value): + if value is None: + return None + + result = [] + for link in value: + parsed_link = link.split(':', 1) + if len(parsed_link) == 2: + link, alias = parsed_link + else: + link, alias = parsed_link[0], parsed_link[0] + result.append('/%s:/%s/%s' % (link, module.params['name'], alias)) + + return result + + +def _ignore_mismatching_label_result(module, client, api_version, option, image, container_value, expected_value): + if option.comparison == 'strict' and module.params['image_label_mismatch'] == 'fail': + # If there are labels from the base image that should be removed and + # base_image_mismatch is fail we want raise an error. + image_labels = _get_image_labels(image) + would_remove_labels = [] + labels_param = module.params['labels'] or {} + for label in image_labels: + if label not in labels_param: + # Format label for error message + would_remove_labels.append('"%s"' % (label, )) + if would_remove_labels: + msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore" + " this error. Labels: {0}") + client.fail(msg.format(', '.join(would_remove_labels))) + return False + + +def _ignore_mismatching_network_result(module, client, api_version, option, image, container_value, expected_value): + # 'networks' is handled out-of-band + if option.name == 'networks': + return True + return False + + +def _preprocess_network_values(module, client, api_version, options, values): + if 'networks' in values: + for network in values['networks']: + network['id'] = _get_network_id(module, client, network['name']) + if not network['id']: + client.fail("Parameter error: network named %s could not be found. Does it exist?" % (network['name'], )) + + if 'network_mode' in values: + values['network_mode'] = _preprocess_container_names(module, client, api_version, values['network_mode']) + + return values + + +def _get_network_id(module, client, network_name): + try: + network_id = None + params = {'filters': json.dumps({'name': [network_name]})} + for network in client.get_json('/networks', params=params): + if network['Name'] == network_name: + network_id = network['Id'] + break + return network_id + except Exception as exc: + client.fail("Error getting network id for %s - %s" % (network_name, to_native(exc))) + + +def _get_values_network(module, container, api_version, options): + value = container['HostConfig'].get('NetworkMode', _SENTRY) + if value is _SENTRY: + return {} + return {'network_mode': value} + + +def _set_values_network(module, data, api_version, options, values): + if 'network_mode' not in values: + return + if 'HostConfig' not in data: + data['HostConfig'] = {} + value = values['network_mode'] + data['HostConfig']['NetworkMode'] = value + + +def _get_values_mounts(module, container, api_version, options): + volumes = container['Config'].get('Volumes') + binds = container['HostConfig'].get('Binds') + # According to https://github.com/moby/moby/, support for HostConfig.Mounts + # has been included at least since v17.03.0-ce, which has API version 1.26. + # The previous tag, v1.9.1, has API version 1.21 and does not have + # HostConfig.Mounts. I have no idea what about API 1.25... + mounts = container['HostConfig'].get('Mounts') + if mounts is not None: + result = [] + empty_dict = {} + for mount in mounts: + result.append({ + 'type': mount.get('Type'), + 'source': mount.get('Source'), + 'target': mount.get('Target'), + 'read_only': mount.get('ReadOnly', False), # golang's omitempty for bool returns None for False + 'consistency': mount.get('Consistency'), + 'propagation': mount.get('BindOptions', empty_dict).get('Propagation'), + 'no_copy': mount.get('VolumeOptions', empty_dict).get('NoCopy', False), + 'labels': mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict), + 'volume_driver': mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name'), + 'volume_options': mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict), + 'tmpfs_size': mount.get('TmpfsOptions', empty_dict).get('SizeBytes'), + 'tmpfs_mode': mount.get('TmpfsOptions', empty_dict).get('Mode'), + }) + mounts = result + result = {} + if volumes is not None: + result['volumes'] = volumes + if binds is not None: + result['volume_binds'] = binds + if mounts is not None: + result['mounts'] = mounts + return result + + +def _get_bind_from_dict(volume_dict): + results = [] + if volume_dict: + for host_path, config in volume_dict.items(): + if isinstance(config, dict) and config.get('bind'): + container_path = config.get('bind') + mode = config.get('mode', 'rw') + results.append("%s:%s:%s" % (host_path, container_path, mode)) + return results + + +def _get_image_binds(volumes): + ''' + Convert array of binds to array of strings with format host_path:container_path:mode + + :param volumes: array of bind dicts + :return: array of strings + ''' + results = [] + if isinstance(volumes, dict): + results += _get_bind_from_dict(volumes) + elif isinstance(volumes, list): + for vol in volumes: + results += _get_bind_from_dict(vol) + return results + + +def _get_expected_values_mounts(module, client, api_version, options, image, values): + expected_values = {} + + # binds + if 'mounts' in values: + expected_values['mounts'] = values['mounts'] + + # volumes + expected_vols = dict() + if image and image['Config'].get('Volumes'): + expected_vols.update(image['Config'].get('Volumes')) + if 'volumes' in values: + for vol in values['volumes']: + # We only expect anonymous volumes to show up in the list + if ':' in vol: + parts = vol.split(':') + if len(parts) == 3: + continue + if len(parts) == 2: + if not _is_volume_permissions(parts[1]): + continue + expected_vols[vol] = {} + if expected_vols: + expected_values['volumes'] = expected_vols + + # binds + image_vols = [] + if image: + image_vols = _get_image_binds(image['Config'].get('Volumes')) + param_vols = [] + if 'volume_binds' in values: + param_vols = values['volume_binds'] + expected_values['volume_binds'] = list(set(image_vols + param_vols)) + + return expected_values + + +def _set_values_mounts(module, data, api_version, options, values): + if 'mounts' in values: + if 'HostConfig' not in data: + data['HostConfig'] = {} + mounts = [] + for mount in values['mounts']: + mount_type = mount.get('type') + mount_res = { + 'Target': mount.get('target'), + 'Source': mount.get('source'), + 'Type': mount_type, + 'ReadOnly': mount.get('read_only'), + } + if 'consistency' in mount: + mount_res['Consistency'] = mount['consistency'] + if mount_type == 'bind': + if 'propagation' in mount: + mount_res['BindOptions'] = { + 'Propagation': mount['propagation'], + } + if mount_type == 'volume': + volume_opts = {} + if mount.get('no_copy'): + volume_opts['NoCopy'] = True + if mount.get('labels'): + volume_opts['Labels'] = mount.get('labels') + if mount.get('volume_driver'): + driver_config = { + 'Name': mount.get('volume_driver'), + } + if mount.get('volume_options'): + driver_config['Options'] = mount.get('volume_options') + volume_opts['DriverConfig'] = driver_config + if volume_opts: + mount_res['VolumeOptions'] = volume_opts + if mount_type == 'tmpfs': + tmpfs_opts = {} + if mount.get('tmpfs_mode'): + tmpfs_opts['Mode'] = mount.get('tmpfs_mode') + if mount.get('tmpfs_size'): + tmpfs_opts['SizeBytes'] = mount.get('tmpfs_size') + if mount.get('tmpfs_opts'): + mount_res['TmpfsOptions'] = mount.get('tmpfs_opts') + mounts.append(mount_res) + data['HostConfig']['Mounts'] = mounts + if 'volumes' in values: + volumes = {} + for volume in values['volumes']: + # Only pass anonymous volumes to create container + if ':' in volume: + parts = volume.split(':') + if len(parts) == 3: + continue + if len(parts) == 2: + if not _is_volume_permissions(parts[1]): + continue + volumes[volume] = {} + data['Volumes'] = volumes + if 'volume_binds' in values: + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['Binds'] = values['volume_binds'] + + +def _get_values_log(module, container, api_version, options): + log_config = container['HostConfig'].get('LogConfig') or {} + return { + 'log_driver': log_config.get('Type'), + 'log_options': log_config.get('Config'), + } + + +def _set_values_log(module, data, api_version, options, values): + if 'log_driver' not in values: + return + log_config = { + 'Type': values['log_driver'], + 'Config': values.get('log_options') or {}, + } + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['LogConfig'] = log_config + + +def _get_values_restart(module, container, api_version, options): + restart_policy = container['HostConfig'].get('RestartPolicy') or {} + return { + 'restart_policy': restart_policy.get('Name'), + 'restart_retries': restart_policy.get('MaximumRetryCount'), + } + + +def _set_values_restart(module, data, api_version, options, values): + if 'restart_policy' not in values: + return + restart_policy = { + 'Name': values['restart_policy'], + 'MaximumRetryCount': values.get('restart_retries'), + } + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['RestartPolicy'] = restart_policy + + +def _update_value_restart(module, data, api_version, options, values): + if 'restart_policy' not in values: + return + data['RestartPolicy'] = { + 'Name': values['restart_policy'], + 'MaximumRetryCount': values.get('restart_retries'), + } + + +def _get_values_ports(module, container, api_version, options): + host_config = container['HostConfig'] + config = container['Config'] + + # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 + if config.get('ExposedPorts') is not None: + expected_exposed = [_normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()] + else: + expected_exposed = [] + + return { + 'published_ports': host_config.get('PortBindings'), + 'exposed_ports': expected_exposed, + 'publish_all_ports': host_config.get('PublishAllPorts'), + } + + +def _get_expected_values_ports(module, client, api_version, options, image, values): + expected_values = {} + + if 'published_ports' in values: + expected_bound_ports = {} + for container_port, config in values['published_ports'].items(): + if isinstance(container_port, int): + container_port = "%s/tcp" % container_port + if len(config) == 1: + if isinstance(config[0], int): + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}] + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for host_ip, host_port in config: + expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')}) + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}] + expected_values['published_ports'] = expected_bound_ports + + image_ports = [] + if image: + image_exposed_ports = image['Config'].get('ExposedPorts') or {} + image_ports = [_normalize_port(p) for p in image_exposed_ports] + param_ports = [] + if 'ports' in values: + param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in values['ports']] + result = list(set(image_ports + param_ports)) + expected_values['exposed_ports'] = result + + if 'publish_all_ports' in values: + expected_values['publish_all_ports'] = values['publish_all_ports'] + + return expected_values + + +def _set_values_ports(module, data, api_version, options, values): + if 'ports' in values: + exposed_ports = {} + for port_definition in values['ports']: + port = port_definition + proto = 'tcp' + if isinstance(port_definition, tuple): + if len(port_definition) == 2: + proto = port_definition[1] + port = port_definition[0] + exposed_ports['%s/%s' % (port, proto)] = {} + data['ExposedPorts'] = exposed_ports + if 'published_ports' in values: + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['PortBindings'] = convert_port_bindings(values['published_ports']) + if 'publish_all_ports' in values and values['publish_all_ports']: + if 'HostConfig' not in data: + data['HostConfig'] = {} + data['HostConfig']['PublishAllPorts'] = values['publish_all_ports'] + + +def _preprocess_value_ports(module, client, api_version, options, values): + if 'published_ports' not in values: + return values + found = False + for port_spec in values['published_ports'].values(): + if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING: + found = True + break + if not found: + return values + default_ip = _get_default_host_ip(module, client) + for port, port_spec in values['published_ports'].items(): + if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING: + values['published_ports'][port] = tuple([default_ip] + list(port_spec[1:])) + return values + + +def _preprocess_container_names(module, client, api_version, value): + if value is None or not value.startswith('container:'): + return value + container_name = value[len('container:'):] + # Try to inspect container to see whether this is an ID or a + # name (and in the latter case, retrieve its ID) + container = client.get_container(container_name) + if container is None: + # If we can't find the container, issue a warning and continue with + # what the user specified. + module.warn('Cannot find a container with name or ID "{0}"'.format(container_name)) + return value + return 'container:{0}'.format(container['Id']) + + +OPTION_AUTO_REMOVE.add_engine('docker_api', DockerAPIEngine.host_config_value('AutoRemove')) + +OPTION_BLKIO_WEIGHT.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioWeight', update_parameter='BlkioWeight')) + +OPTION_CAPABILITIES.add_engine('docker_api', DockerAPIEngine.host_config_value('CapAdd')) + +OPTION_CAP_DROP.add_engine('docker_api', DockerAPIEngine.host_config_value('CapDrop')) + +OPTION_CGROUP_PARENT.add_engine('docker_api', DockerAPIEngine.host_config_value('CgroupParent')) + +OPTION_COMMAND.add_engine('docker_api', DockerAPIEngine.config_value('Cmd')) + +OPTION_CPU_PERIOD.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuPeriod', update_parameter='CpuPeriod')) + +OPTION_CPU_QUOTA.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuQuota', update_parameter='CpuQuota')) + +OPTION_CPUSET_CPUS.add_engine('docker_api', DockerAPIEngine.host_config_value('CpusetCpus', update_parameter='CpusetCpus')) + +OPTION_CPUSET_MEMS.add_engine('docker_api', DockerAPIEngine.host_config_value('CpusetMems', update_parameter='CpusetMems')) + +OPTION_CPU_SHARES.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuShares', update_parameter='CpuShares')) + +OPTION_ENTRYPOINT.add_engine('docker_api', DockerAPIEngine.config_value('Entrypoint')) + +OPTION_CPUS.add_engine('docker_api', DockerAPIEngine.host_config_value('NanoCpus', preprocess_value=_preprocess_cpus)) + +OPTION_DETACH_INTERACTIVE.add_engine('docker_api', DockerAPIEngine(get_value=_get_value_detach_interactive, set_value=_set_value_detach_interactive)) + +OPTION_DEVICES.add_engine('docker_api', DockerAPIEngine.host_config_value('Devices', preprocess_value=_preprocess_devices)) + +OPTION_DEVICE_READ_BPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceReadBps', preprocess_value=_preprocess_rate_bps)) + +OPTION_DEVICE_WRITE_BPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', preprocess_value=_preprocess_rate_bps)) + +OPTION_DEVICE_READ_IOPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', preprocess_value=_preprocess_rate_iops)) + +OPTION_DEVICE_WRITE_IOPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', preprocess_value=_preprocess_rate_iops)) + +OPTION_DEVICE_REQUESTS.add_engine('docker_api', DockerAPIEngine.host_config_value( + 'DeviceRequests', min_api_version='1.40', preprocess_value=_preprocess_device_requests)) + +OPTION_DNS_SERVERS.add_engine('docker_api', DockerAPIEngine.host_config_value('Dns')) + +OPTION_DNS_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('DnsOptions')) + +OPTION_DNS_SEARCH_DOMAINS.add_engine('docker_api', DockerAPIEngine.host_config_value('DnsSearch')) + +OPTION_DOMAINNAME.add_engine('docker_api', DockerAPIEngine.config_value('Domainname')) + +OPTION_ENVIRONMENT.add_engine('docker_api', DockerAPIEngine.config_value('Env', get_expected_value=_get_expected_env_value)) + +OPTION_ETC_HOSTS.add_engine('docker_api', DockerAPIEngine.host_config_value('ExtraHosts', preprocess_value=_preprocess_etc_hosts)) + +OPTION_GROUPS.add_engine('docker_api', DockerAPIEngine.host_config_value('GroupAdd')) + +OPTION_HEALTHCHECK.add_engine('docker_api', DockerAPIEngine.config_value( + 'Healthcheck', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value)) + +OPTION_HOSTNAME.add_engine('docker_api', DockerAPIEngine.config_value('Hostname')) + +OPTION_IMAGE.add_engine('docker_api', DockerAPIEngine.config_value( + 'Image', ignore_mismatching_result=lambda module, client, api_version, option, image, container_value, expected_value: True)) + +OPTION_INIT.add_engine('docker_api', DockerAPIEngine.host_config_value('Init')) + +OPTION_IPC_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('IpcMode', preprocess_value=_preprocess_container_names)) + +OPTION_KERNEL_MEMORY.add_engine('docker_api', DockerAPIEngine.host_config_value('KernelMemory', update_parameter='KernelMemory')) + +OPTION_LABELS.add_engine('docker_api', DockerAPIEngine.config_value( + 'Labels', get_expected_value=_get_expected_labels_value, ignore_mismatching_result=_ignore_mismatching_label_result)) + +OPTION_LINKS.add_engine('docker_api', DockerAPIEngine.host_config_value('Links', preprocess_value=_preprocess_links)) + +OPTION_LOG_DRIVER_OPTIONS.add_engine('docker_api', DockerAPIEngine( + get_value=_get_values_log, + set_value=_set_values_log, +)) + +OPTION_MAC_ADDRESS.add_engine('docker_api', DockerAPIEngine.config_value('MacAddress')) + +OPTION_MEMORY.add_engine('docker_api', DockerAPIEngine.host_config_value('Memory', update_parameter='Memory')) + +OPTION_MEMORY_RESERVATION.add_engine('docker_api', DockerAPIEngine.host_config_value('MemoryReservation', update_parameter='MemoryReservation')) + +OPTION_MEMORY_SWAP.add_engine('docker_api', DockerAPIEngine.host_config_value('MemorySwap', update_parameter='MemorySwap')) + +OPTION_MEMORY_SWAPPINESS.add_engine('docker_api', DockerAPIEngine.host_config_value('MemorySwappiness')) + +OPTION_STOP_TIMEOUT.add_engine('docker_api', DockerAPIEngine.config_value('StopTimeout')) + +OPTION_NETWORK.add_engine('docker_api', DockerAPIEngine( + preprocess_value=_preprocess_network_values, + get_value=_get_values_network, + set_value=_set_values_network, + ignore_mismatching_result=_ignore_mismatching_network_result, +)) + +OPTION_OOM_KILLER.add_engine('docker_api', DockerAPIEngine.host_config_value('OomKillDisable')) + +OPTION_OOM_SCORE_ADJ.add_engine('docker_api', DockerAPIEngine.host_config_value('OomScoreAdj')) + +OPTION_PID_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('PidMode', preprocess_value=_preprocess_container_names)) + +OPTION_PIDS_LIMIT.add_engine('docker_api', DockerAPIEngine.host_config_value('PidsLimit')) + +OPTION_PRIVILEGED.add_engine('docker_api', DockerAPIEngine.host_config_value('Privileged')) + +OPTION_READ_ONLY.add_engine('docker_api', DockerAPIEngine.host_config_value('ReadonlyRootfs')) + +OPTION_RESTART_POLICY.add_engine('docker_api', DockerAPIEngine( + get_value=_get_values_restart, + set_value=_set_values_restart, + update_value=_update_value_restart, +)) + +OPTION_RUNTIME.add_engine('docker_api', DockerAPIEngine.host_config_value('Runtime')) + +OPTION_SECURITY_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('SecurityOpt')) + +OPTION_SHM_SIZE.add_engine('docker_api', DockerAPIEngine.host_config_value('ShmSize')) + +OPTION_STOP_SIGNAL.add_engine('docker_api', DockerAPIEngine.config_value('StopSignal')) + +OPTION_STORAGE_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('StorageOpt')) + +OPTION_SYSCTLS.add_engine('docker_api', DockerAPIEngine.host_config_value('Sysctls')) + +OPTION_TMPFS.add_engine('docker_api', DockerAPIEngine.host_config_value('Tmpfs')) + +OPTION_TTY.add_engine('docker_api', DockerAPIEngine.config_value('Tty')) + +OPTION_ULIMITS.add_engine('docker_api', DockerAPIEngine.host_config_value('Ulimits')) + +OPTION_USER.add_engine('docker_api', DockerAPIEngine.config_value('User')) + +OPTION_USERNS_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('UsernsMode')) + +OPTION_UTS.add_engine('docker_api', DockerAPIEngine.host_config_value('UTSMode')) + +OPTION_VOLUME_DRIVER.add_engine('docker_api', DockerAPIEngine.host_config_value('VolumeDriver')) + +OPTION_VOLUMES_FROM.add_engine('docker_api', DockerAPIEngine.host_config_value('VolumesFrom')) + +OPTION_WORKING_DIR.add_engine('docker_api', DockerAPIEngine.config_value('WorkingDir')) + +OPTION_MOUNTS_VOLUMES.add_engine('docker_api', DockerAPIEngine( + get_value=_get_values_mounts, + get_expected_values=_get_expected_values_mounts, + set_value=_set_values_mounts, +)) + +OPTION_PORTS.add_engine('docker_api', DockerAPIEngine( + get_value=_get_values_ports, + get_expected_values=_get_expected_values_ports, + set_value=_set_values_ports, + preprocess_value=_preprocess_value_ports, +)) diff --git a/plugins/module_utils/module_container/module.py b/plugins/module_utils/module_container/module.py new file mode 100644 index 000000000..56d266b2d --- /dev/null +++ b/plugins/module_utils/module_container/module.py @@ -0,0 +1,803 @@ +# Copyright (c) 2022 Felix Fontein +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import re +from time import sleep + +from ansible.module_utils.common.text.converters import to_native, to_text + +from ansible_collections.community.docker.plugins.module_utils.util import ( + DifferenceTracker, + DockerBaseClass, + compare_generic, + is_image_name_id, + sanitize_result, +) + +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag + + +class Container(DockerBaseClass): + def __init__(self, container, engine_driver): + super(Container, self).__init__() + self.raw = container + self.id = None + self.image = None + self.container = container + self.engine_driver = engine_driver + if container: + self.id = engine_driver.get_container_id(container) + self.image = engine_driver.get_image_from_container(container) + self.log(self.container, pretty_print=True) + + @property + def exists(self): + return True if self.container else False + + @property + def removing(self): + return self.engine_driver.is_container_removing(self.container) if self.container else False + + @property + def running(self): + return self.engine_driver.is_container_running(self.container) if self.container else False + + @property + def paused(self): + return self.engine_driver.is_container_paused(self.container) if self.container else False + + +class ContainerManager(DockerBaseClass): + def __init__(self, module, engine_driver, client, active_options): + self.module = module + self.engine_driver = engine_driver + self.client = client + self.options = active_options + self.all_options = self._collect_all_options(active_options) + self.check_mode = self.module.check_mode + self.param_cleanup = self.module.params['cleanup'] + self.param_container_default_behavior = self.module.params['container_default_behavior'] + self.param_default_host_ip = self.module.params['default_host_ip'] + self.param_debug = self.module.params['debug'] + self.param_force_kill = self.module.params['force_kill'] + self.param_image = self.module.params['image'] + self.param_image_label_mismatch = self.module.params['image_label_mismatch'] + self.param_keep_volumes = self.module.params['keep_volumes'] + self.param_kill_signal = self.module.params['kill_signal'] + self.param_name = self.module.params['name'] + self.param_networks_cli_compatible = self.module.params['networks_cli_compatible'] + self.param_output_logs = self.module.params['output_logs'] + self.param_paused = self.module.params['paused'] + self.param_pull = self.module.params['pull'] + self.param_purge_networks = self.module.params['purge_networks'] + self.param_recreate = self.module.params['recreate'] + self.param_removal_wait_timeout = self.module.params['removal_wait_timeout'] + self.param_restart = self.module.params['restart'] + self.param_state = self.module.params['state'] + self._parse_comparisons() + self._update_params() + self.results = {'changed': False, 'actions': []} + self.diff = {} + self.diff_tracker = DifferenceTracker() + self.facts = {} + if self.param_default_host_ip: + valid_ip = False + if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip): + valid_ip = True + if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip): + valid_ip = True + if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip): + self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip) + valid_ip = True + if not valid_ip: + self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' + 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip)) + + def _collect_all_options(self, active_options): + all_options = {} + for options in active_options: + for option in options.options: + all_options[option.name] = option + return all_options + + def _collect_all_module_params(self): + all_module_options = set() + for option, data in self.module.argument_spec.items(): + all_module_options.add(option) + if 'aliases' in data: + for alias in data['aliases']: + all_module_options.add(alias) + return all_module_options + + def _parse_comparisons(self): + # Keep track of all module params and all option aliases + all_module_options = self._collect_all_module_params() + comp_aliases = {} + for option_name, option in self.all_options.items(): + if option.not_an_ansible_option: + continue + comp_aliases[option_name] = option_name + for alias in option.ansible_aliases: + comp_aliases[alias] = option_name + # Process legacy ignore options + if self.module.params['ignore_image']: + self.all_options['image'].comparison = 'ignore' + if self.param_purge_networks: + self.all_options['networks'].comparison = 'strict' + # Process comparsions specified by user + if self.module.params.get('comparisons'): + # If '*' appears in comparisons, process it first + if '*' in self.module.params['comparisons']: + value = self.module.params['comparisons']['*'] + if value not in ('strict', 'ignore'): + self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") + for option in self.all_options.values(): + if option.name == 'networks': + # `networks` is special: only update if + # some value is actually specified + if self.module.params['networks'] is None: + continue + option.comparison = value + # Now process all other comparisons. + comp_aliases_used = {} + for key, value in self.module.params['comparisons'].items(): + if key == '*': + continue + # Find main key + key_main = comp_aliases.get(key) + if key_main is None: + if key_main in all_module_options: + self.fail("The module option '%s' cannot be specified in the comparisons dict, " + "since it does not correspond to container's state!" % key) + if key not in self.all_options or self.all_options[key].not_an_ansible_option: + self.fail("Unknown module option '%s' in comparisons dict!" % key) + key_main = key + if key_main in comp_aliases_used: + self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) + comp_aliases_used[key_main] = key + # Check value and update accordingly + if value in ('strict', 'ignore'): + self.all_options[key_main].comparison = value + elif value == 'allow_more_present': + if self.all_options[key_main].comparison_type == 'value': + self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) + self.all_options[key_main].comparison = value + else: + self.fail("Unknown comparison mode '%s'!" % value) + # Copy values + for option in self.all_options.values(): + if option.copy_comparison_from is not None: + option.comparison = self.all_options[option.copy_comparison_from].comparison + # Check legacy values + if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore': + self.module.warn('The ignore_image option has been overridden by the comparisons option!') + if self.param_purge_networks and self.all_options['networks'].comparison != 'strict': + self.module.warn('The purge_networks option has been overridden by the comparisons option!') + + def _update_params(self): + if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None: + # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode + # (assuming no explicit value is specified for network_mode) + self.module.params['network_mode'] = self.module.params['networks'][0]['name'] + if self.param_container_default_behavior == 'compatibility': + old_default_values = dict( + auto_remove=False, + detach=True, + init=False, + interactive=False, + memory='0', + paused=False, + privileged=False, + read_only=False, + tty=False, + ) + for param, value in old_default_values.items(): + if self.module.params[param] is None: + self.module.params[param] = value + + def fail(self, *args, **kwargs): + self.client.fail(*args, **kwargs) + + def run(self): + if self.param_state in ('stopped', 'started', 'present'): + self.present(self.param_state) + elif self.param_state == 'absent': + self.absent() + + if not self.check_mode and not self.param_debug: + self.results.pop('actions') + + if self.module._diff or self.param_debug: + self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() + self.results['diff'] = self.diff + + if self.facts: + self.results['container'] = self.facts + + def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None): + delay = 1.0 + total_wait = 0 + while True: + # Inspect container + result = self.engine_driver.inspect_container_by_id(self.client, container_id) + if result is None: + if accept_removal: + return + msg = 'Encontered vanished container while waiting for container "{0}"' + self.fail(msg.format(container_id)) + # Check container state + state = result.get('State', {}).get('Status') + if complete_states is not None and state in complete_states: + return + if wait_states is not None and state not in wait_states: + msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"' + self.fail(msg.format(container_id, state)) + # Wait + if max_wait is not None: + if total_wait > max_wait: + msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"' + self.fail(msg.format(container_id, max_wait)) + if total_wait + delay > max_wait: + delay = max_wait - total_wait + sleep(delay) + total_wait += delay + # Exponential backoff, but never wait longer than 10 seconds + # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations + # until the maximal 10 seconds delay is reached. By then, the + # code will have slept for ~1.5 minutes.) + delay = min(delay * 1.1, 10) + + def _collect_params(self, active_options): + parameters = [] + for options in active_options: + values = {} + engine = options.get_engine(self.engine_driver.name) + for option in options.options: + if not option.not_an_ansible_option and self.module.params[option.name] is not None: + values[option.name] = self.module.params[option.name] + values = options.preprocess(self.module, values) + engine.preprocess_value(self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, values) + parameters.append((options, values)) + return parameters + + def present(self, state): + self.parameters = self._collect_params(self.options) + container = self._get_container(self.param_name) + was_running = container.running + was_paused = container.paused + container_created = False + + # If the image parameter was passed then we need to deal with the image + # version comparison. Otherwise we handle this depending on whether + # the container already runs or not; in the former case, in case the + # container needs to be restarted, we use the existing container's + # image ID. + image = self._get_image() + self.log(image, pretty_print=True) + if not container.exists or container.removing: + # New container + if container.removing: + self.log('Found container in removal phase') + else: + self.log('No container found') + if not self.param_image: + self.fail('Cannot create container when image is not specified!') + self.diff_tracker.add('exists', parameter=True, active=False) + if container.removing and not self.check_mode: + # Wait for container to be removed before trying to create it + self.wait_for_state( + container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) + new_container = self.container_create(self.param_image) + if new_container: + container = new_container + container_created = True + else: + # Existing container + different, differences = self.has_different_configuration(container, image) + image_different = False + if self.all_options['image'].comparison == 'strict': + image_different = self._image_is_different(image, container) + if image_different or different or self.param_recreate: + self.diff_tracker.merge(differences) + self.diff['differences'] = differences.get_legacy_docker_container_diffs() + if image_different: + self.diff['image_different'] = True + self.log("differences") + self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) + image_to_use = self.param_image + if not image_to_use and container and container.image: + image_to_use = container.image + if not image_to_use: + self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') + if container.running: + self.container_stop(container.id) + self.container_remove(container.id) + if not self.check_mode: + self.wait_for_state( + container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout) + new_container = self.container_create(image_to_use) + if new_container: + container = new_container + container_created = True + + if container and container.exists: + container = self.update_limits(container, image) + container = self.update_networks(container, container_created) + + if state == 'started' and not container.running: + self.diff_tracker.add('running', parameter=True, active=was_running) + container = self.container_start(container.id) + elif state == 'started' and self.param_restart: + self.diff_tracker.add('running', parameter=True, active=was_running) + self.diff_tracker.add('restarted', parameter=True, active=False) + container = self.container_restart(container.id) + elif state == 'stopped' and container.running: + self.diff_tracker.add('running', parameter=False, active=was_running) + self.container_stop(container.id) + container = self._get_container(container.id) + + if state == 'started' and self.param_paused is not None and container.paused != self.param_paused: + self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused) + if not self.check_mode: + try: + if self.param_paused: + self.engine_driver.pause_container(self.client, container.id) + else: + self.engine_driver.unpause_container(self.client, container.id) + except Exception as exc: + self.fail("Error %s container %s: %s" % ( + "pausing" if self.param_paused else "unpausing", container.id, to_native(exc) + )) + container = self._get_container(container.id) + self.results['changed'] = True + self.results['actions'].append(dict(set_paused=self.param_paused)) + + self.facts = container.raw + + def absent(self): + container = self._get_container(self.param_name) + if container.exists: + if container.running: + self.diff_tracker.add('running', parameter=False, active=True) + self.container_stop(container.id) + self.diff_tracker.add('exists', parameter=False, active=True) + self.container_remove(container.id) + + def _output_logs(self, msg): + self.module.log(msg=msg) + + def _get_container(self, container): + ''' + Expects container ID or Name. Returns a container object + ''' + container = self.engine_driver.inspect_container_by_name(self.client, container) + return Container(container, self.engine_driver) + + def _get_image(self): + image_parameter = self.param_image + if not image_parameter: + self.log('No image specified') + return None + if is_image_name_id(image_parameter): + image = self.engine_driver.inspect_image_by_id(self.client, image_parameter) + else: + repository, tag = parse_repository_tag(image_parameter) + if not tag: + tag = "latest" + image = self.engine_driver.inspect_image_by_name(self.client, repository, tag) + if not image or self.param_pull: + if not self.check_mode: + self.log("Pull the image.") + image, alreadyToLatest = self.engine_driver.pull_image(self.client, repository, tag) + if alreadyToLatest: + self.results['changed'] = False + else: + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + elif not image: + # If the image isn't there, claim we'll pull. + # (Implicitly: if the image is there, claim it already was latest.) + self.results['changed'] = True + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + + self.log("image") + self.log(image, pretty_print=True) + return image + + def _image_is_different(self, image, container): + if image and image.get('Id'): + if container and container.image: + if image.get('Id') != container.image: + self.diff_tracker.add('image', parameter=image.get('Id'), active=container.image) + return True + return False + + def _compose_create_parameters(self, image): + params = {} + for options, values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + if engine.can_set_value(self.engine_driver.get_api_version(self.client)): + engine.set_value(self.module, params, self.engine_driver.get_api_version(self.client), options.options, values) + params['Image'] = image + return params + + def _record_differences(self, differences, options, param_values, engine, container, image): + container_values = engine.get_value(self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options) + expected_values = engine.get_expected_values( + self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy()) + for option in options.options: + if option.name in expected_values: + param_value = expected_values[option.name] + container_value = container_values.get(option.name) + match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) + + if not match: + # No match. + if engine.ignore_mismatching_result(self.module, self.client, self.engine_driver.get_api_version(self.client), + option, image, container_value, param_value): + # Ignore the result + continue + + # Record the differences + p = param_value + c = container_value + if option.comparison_type == 'set': + # Since the order does not matter, sort so that the diff output is better. + if p is not None: + p = sorted(p) + if c is not None: + c = sorted(c) + elif option.comparison_type == 'set(dict)': + # Since the order does not matter, sort so that the diff output is better. + if option.name == 'expected_mounts': + # For selected values, use one entry as key + def sort_key_fn(x): + return x['target'] + else: + # We sort the list of dictionaries by using the sorted items of a dict as its key. + def sort_key_fn(x): + return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) + if p is not None: + p = sorted(p, key=sort_key_fn) + if c is not None: + c = sorted(c, key=sort_key_fn) + differences.add(option.name, parameter=p, active=c) + + def has_different_configuration(self, container, image): + differences = DifferenceTracker() + update_differences = DifferenceTracker() + for options, param_values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + if engine.can_update_value(self.engine_driver.get_api_version(self.client)): + self._record_differences(update_differences, options, param_values, engine, container, image) + else: + self._record_differences(differences, options, param_values, engine, container, image) + has_differences = not differences.empty + # Only consider differences of properties that can be updated when there are also other differences + if has_differences: + differences.merge(update_differences) + return has_differences, differences + + def has_different_resource_limits(self, container, image): + differences = DifferenceTracker() + for options, param_values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + if not engine.can_update_value(self.engine_driver.get_api_version(self.client)): + continue + self._record_differences(differences, options, param_values, engine, container, image) + has_differences = not differences.empty + return has_differences, differences + + def _compose_update_parameters(self): + result = {} + for options, values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + if not engine.can_update_value(self.engine_driver.get_api_version(self.client)): + continue + engine.update_value(self.module, result, self.engine_driver.get_api_version(self.client), options.options, values) + return result + + def update_limits(self, container, image): + limits_differ, different_limits = self.has_different_resource_limits(container, image) + if limits_differ: + self.log("limit differences:") + self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) + self.diff_tracker.merge(different_limits) + if limits_differ and not self.check_mode: + self.container_update(container.id, self._compose_update_parameters()) + return self._get_container(container.id) + return container + + def has_network_differences(self, container): + ''' + Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 + ''' + different = False + differences = [] + + if not self.module.params['networks']: + return different, differences + + if not container.container.get('NetworkSettings'): + self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = container.container['NetworkSettings']['Networks'] + for network in self.module.params['networks']: + network_info = connected_networks.get(network['name']) + if network_info is None: + different = True + differences.append(dict( + parameter=network, + container=None + )) + else: + diff = False + network_info_ipam = network_info.get('IPAMConfig') or {} + if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): + diff = True + if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): + diff = True + if network.get('aliases'): + if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): + diff = True + if network.get('links'): + expected_links = [] + for link, alias in network['links']: + expected_links.append("%s:%s" % (link, alias)) + if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): + diff = True + if diff: + different = True + differences.append(dict( + parameter=network, + container=dict( + name=network['name'], + ipv4_address=network_info_ipam.get('IPv4Address'), + ipv6_address=network_info_ipam.get('IPv6Address'), + aliases=network_info.get('Aliases'), + links=network_info.get('Links') + ) + )) + return different, differences + + def has_extra_networks(self, container): + ''' + Check if the container is connected to non-requested networks + ''' + extra_networks = [] + extra = False + + if not container.container.get('NetworkSettings'): + self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = container.container['NetworkSettings'].get('Networks') + if connected_networks: + for network, network_config in connected_networks.items(): + keep = False + if self.module.params['networks']: + for expected_network in self.module.params['networks']: + if expected_network['name'] == network: + keep = True + if not keep: + extra = True + extra_networks.append(dict(name=network, id=network_config['NetworkID'])) + return extra, extra_networks + + def update_networks(self, container, container_created): + updated_container = container + if self.all_options['networks'].comparison != 'ignore' or container_created: + has_network_differences, network_differences = self.has_network_differences(container) + if has_network_differences: + if self.diff.get('differences'): + self.diff['differences'].append(dict(network_differences=network_differences)) + else: + self.diff['differences'] = [dict(network_differences=network_differences)] + for netdiff in network_differences: + self.diff_tracker.add( + 'network.{0}'.format(netdiff['parameter']['name']), + parameter=netdiff['parameter'], + active=netdiff['container'] + ) + self.results['changed'] = True + updated_container = self._add_networks(container, network_differences) + + if (self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None) or self.param_purge_networks: + has_extra_networks, extra_networks = self.has_extra_networks(container) + if has_extra_networks: + if self.diff.get('differences'): + self.diff['differences'].append(dict(purge_networks=extra_networks)) + else: + self.diff['differences'] = [dict(purge_networks=extra_networks)] + for extra_network in extra_networks: + self.diff_tracker.add( + 'network.{0}'.format(extra_network['name']), + active=extra_network + ) + self.results['changed'] = True + updated_container = self._purge_networks(container, extra_networks) + return updated_container + + def _add_networks(self, container, differences): + for diff in differences: + # remove the container from the network, if connected + if diff.get('container'): + self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) + if not self.check_mode: + try: + self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], + to_native(exc))) + # connect to the network + self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter'])) + if not self.check_mode: + params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')} + try: + self.log("Connecting container to network %s" % diff['parameter']['id']) + self.log(params, pretty_print=True) + self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params) + except Exception as exc: + self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) + return self._get_container(container.id) + + def _purge_networks(self, container, networks): + for network in networks: + self.results['actions'].append(dict(removed_from_network=network['name'])) + if not self.check_mode: + try: + self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (network['name'], + to_native(exc))) + return self._get_container(container.id) + + def container_create(self, image): + create_parameters = self._compose_create_parameters(image) + self.log("create container") + self.log("image: %s parameters:" % image) + self.log(create_parameters, pretty_print=True) + self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) + self.results['changed'] = True + new_container = None + if not self.check_mode: + try: + container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters) + except Exception as exc: + self.fail("Error creating container: %s" % to_native(exc)) + return self._get_container(container_id) + return new_container + + def container_start(self, container_id): + self.log("start container %s" % (container_id)) + self.results['actions'].append(dict(started=container_id)) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.start_container(self.client, container_id) + except Exception as exc: + self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) + + if self.module.params['detach'] is False: + status = self.engine_driver.wait_for_container(self.client, container_id) + self.client.fail_results['status'] = status + self.results['status'] = status + + if self.module.params['auto_remove']: + output = "Cannot retrieve result as auto_remove is enabled" + if self.param_output_logs: + self.module.warn('Cannot output_logs if auto_remove is enabled!') + else: + output, real_output = self.engine_driver.get_container_output(self.client, container_id) + if real_output and self.param_output_logs: + self._output_logs(msg=output) + + if self.param_cleanup: + self.container_remove(container_id, force=True) + insp = self._get_container(container_id) + if insp.raw: + insp.raw['Output'] = output + else: + insp.raw = dict(Output=output) + if status != 0: + # Set `failed` to True and return output as msg + self.results['failed'] = True + self.results['msg'] = output + return insp + return self._get_container(container_id) + + def container_remove(self, container_id, link=False, force=False): + volume_state = (not self.param_keep_volumes) + self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) + self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force) + except Exception as exc: + self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc))) + + def container_update(self, container_id, update_parameters): + if update_parameters: + self.log("update container %s" % (container_id)) + self.log(update_parameters, pretty_print=True) + self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.update_container(self.client, container_id, update_parameters) + except Exception as exc: + self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) + return self._get_container(container_id) + + def container_kill(self, container_id): + self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal)) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal) + except Exception as exc: + self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) + + def container_restart(self, container_id): + self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout'])) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10) + except Exception as exc: + self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) + return self._get_container(container_id) + + def container_stop(self, container_id): + if self.param_force_kill: + self.container_kill(container_id) + return + self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout'])) + self.results['changed'] = True + if not self.check_mode: + try: + self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout']) + except Exception as exc: + self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) + + +def run_module(engine_driver): + module, active_options, client = engine_driver.setup( + argument_spec=dict( + cleanup=dict(type='bool', default=False), + comparisons=dict(type='dict'), + container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), + command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'), + default_host_ip=dict(type='str'), + force_kill=dict(type='bool', default=False, aliases=['forcekill']), + ignore_image=dict(type='bool', default=False), + image=dict(type='str'), + image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), + keep_volumes=dict(type='bool', default=True), + kill_signal=dict(type='str'), + name=dict(type='str', required=True), + networks_cli_compatible=dict(type='bool', default=True), + output_logs=dict(type='bool', default=False), + paused=dict(type='bool'), + pull=dict(type='bool', default=False), + purge_networks=dict(type='bool', default=False), + recreate=dict(type='bool', default=False), + removal_wait_timeout=dict(type='float'), + restart=dict(type='bool', default=False), + state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), + ), + required_if=[ + ('state', 'present', ['image']) + ], + ) + + def execute(): + cm = ContainerManager(module, engine_driver, client, active_options) + cm.run() + module.exit_json(**sanitize_result(cm.results)) + + engine_driver.run(execute, client) diff --git a/plugins/module_utils/util.py b/plugins/module_utils/util.py index b09017da7..6d1a14bd7 100644 --- a/plugins/module_utils/util.py +++ b/plugins/module_utils/util.py @@ -331,52 +331,58 @@ def convert_duration_to_nanosecond(time_str): return time_in_nanoseconds -def parse_healthcheck(healthcheck): +def normalize_healthcheck_test(test): + if isinstance(test, (tuple, list)): + return [str(e) for e in test] + return ['CMD-SHELL', str(test)] + + +def normalize_healthcheck(healthcheck, normalize_test=False): """ - Return dictionary of healthcheck parameters and boolean if - healthcheck defined in image was requested to be disabled. + Return dictionary of healthcheck parameters. """ - if (not healthcheck) or (not healthcheck.get('test')): - return None, None - result = dict() # All supported healthcheck parameters - options = dict( - test='test', - interval='interval', - timeout='timeout', - start_period='start_period', - retries='retries' - ) + options = ('test', 'interval', 'timeout', 'start_period', 'retries') - duration_options = ['interval', 'timeout', 'start_period'] + duration_options = ('interval', 'timeout', 'start_period') - for (key, value) in options.items(): - if value in healthcheck: - if healthcheck.get(value) is None: + for key in options: + if key in healthcheck: + value = healthcheck[key] + if value is None: # due to recursive argument_spec, all keys are always present # (but have default value None if not specified) continue - if value in duration_options: - time = convert_duration_to_nanosecond(healthcheck.get(value)) - if time: - result[key] = time - elif healthcheck.get(value): - result[key] = healthcheck.get(value) - if key == 'test': - if isinstance(result[key], (tuple, list)): - result[key] = [str(e) for e in result[key]] - else: - result[key] = ['CMD-SHELL', str(result[key])] - elif key == 'retries': - try: - result[key] = int(result[key]) - except ValueError: - raise ValueError( - 'Cannot parse number of retries for healthcheck. ' - 'Expected an integer, got "{0}".'.format(result[key]) - ) + if key in duration_options: + value = convert_duration_to_nanosecond(value) + if not value: + continue + if key == 'retries': + try: + value = int(value) + except ValueError: + raise ValueError( + 'Cannot parse number of retries for healthcheck. ' + 'Expected an integer, got "{0}".'.format(value) + ) + if key == 'test' and normalize_test: + value = normalize_healthcheck_test(value) + result[key] = value + + return result + + +def parse_healthcheck(healthcheck): + """ + Return dictionary of healthcheck parameters and boolean if + healthcheck defined in image was requested to be disabled. + """ + if (not healthcheck) or (not healthcheck.get('test')): + return None, None + + result = normalize_healthcheck(healthcheck, normalize_test=True) if result['test'] == ['NONE']: # If the user explicitly disables the healthcheck, return None diff --git a/plugins/modules/docker_container.py b/plugins/modules/docker_container.py index bf206e948..42f2d5b3d 100644 --- a/plugins/modules/docker_container.py +++ b/plugins/modules/docker_container.py @@ -11,10 +11,10 @@ --- module: docker_container -short_description: manage docker containers +short_description: manage Docker containers description: - - Manage the life cycle of docker containers. + - Manage the life cycle of Docker containers. - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken. @@ -670,8 +670,6 @@ pid_mode: description: - Set the PID namespace mode for the container. - - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the - Docker SDK for Python (docker) allow all values supported by the Docker daemon. type: str pids_limit: description: @@ -898,7 +896,6 @@ - "Felix Fontein (@felixfontein)" requirements: - - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0" - "Docker API >= 1.25" ''' @@ -1202,2429 +1199,18 @@ sample: 0 ''' -import os -import re -import shlex -import traceback -from time import sleep - -from ansible.module_utils.common.text.formatters import human_to_bytes -from ansible.module_utils.six import string_types -from ansible.module_utils.common.text.converters import to_native, to_text - -from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion - -from ansible_collections.community.docker.plugins.module_utils.common import ( - AnsibleDockerClient, - RequestException, -) -from ansible_collections.community.docker.plugins.module_utils.util import ( - DifferenceTracker, - DockerBaseClass, - compare_generic, - is_image_name_id, - sanitize_result, - clean_dict_booleans_for_docker_api, - omit_none_from_dict, - parse_healthcheck, - DOCKER_COMMON_ARGS, +from ansible_collections.community.docker.plugins.module_utils.module_container.docker_api import ( + DockerAPIEngineDriver, ) -try: - from docker import utils - from ansible_collections.community.docker.plugins.module_utils.common import docker_version - if LooseVersion(docker_version) >= LooseVersion('1.10.0'): - from docker.types import Ulimit, LogConfig - from docker import types as docker_types - else: - from docker.utils.types import Ulimit, LogConfig - from docker.errors import DockerException, APIError, NotFound -except Exception: - # missing Docker SDK for Python handled in ansible.module_utils.docker.common - pass - - -REQUIRES_CONVERSION_TO_BYTES = [ - 'kernel_memory', - 'memory', - 'memory_reservation', - 'memory_swap', - 'shm_size' -] - - -def is_volume_permissions(mode): - for part in mode.split(','): - if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'): - return False - return True - - -def parse_port_range(range_or_port, client): - ''' - Parses a string containing either a single port or a range of ports. - - Returns a list of integers for each port in the list. - ''' - if '-' in range_or_port: - try: - start, end = [int(port) for port in range_or_port.split('-')] - except Exception: - client.fail('Invalid port range: "{0}"'.format(range_or_port)) - if end < start: - client.fail('Invalid port range: "{0}"'.format(range_or_port)) - return list(range(start, end + 1)) - else: - try: - return [int(range_or_port)] - except Exception: - client.fail('Invalid port: "{0}"'.format(range_or_port)) - - -def split_colon_ipv6(text, client): - ''' - Split string by ':', while keeping IPv6 addresses in square brackets in one component. - ''' - if '[' not in text: - return text.split(':') - start = 0 - result = [] - while start < len(text): - i = text.find('[', start) - if i < 0: - result.extend(text[start:].split(':')) - break - j = text.find(']', i) - if j < 0: - client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1)) - result.extend(text[start:i].split(':')) - k = text.find(':', j) - if k < 0: - result[-1] += text[i:] - start = len(text) - else: - result[-1] += text[i:k] - if k == len(text): - result.append('') - break - start = k + 1 - return result - - -class TaskParameters(DockerBaseClass): - ''' - Access and parse module parameters - ''' - - def __init__(self, client): - super(TaskParameters, self).__init__() - self.client = client - - self.auto_remove = None - self.blkio_weight = None - self.capabilities = None - self.cap_drop = None - self.cleanup = None - self.command = None - self.cpu_period = None - self.cpu_quota = None - self.cpus = None - self.cpuset_cpus = None - self.cpuset_mems = None - self.cpu_shares = None - self.debug = None - self.default_host_ip = None - self.detach = None - self.devices = None - self.device_read_bps = None - self.device_write_bps = None - self.device_read_iops = None - self.device_write_iops = None - self.device_requests = None - self.dns_servers = None - self.dns_opts = None - self.dns_search_domains = None - self.domainname = None - self.env = None - self.env_file = None - self.entrypoint = None - self.etc_hosts = None - self.exposed_ports = None - self.force_kill = None - self.groups = None - self.healthcheck = None - self.hostname = None - self.ignore_image = None - self.image = None - self.init = None - self.interactive = None - self.ipc_mode = None - self.keep_volumes = None - self.kernel_memory = None - self.kill_signal = None - self.labels = None - self.links = None - self.log_driver = None - self.output_logs = None - self.log_options = None - self.mac_address = None - self.memory = None - self.memory_reservation = None - self.memory_swap = None - self.memory_swappiness = None - self.mounts = None - self.name = None - self.network_mode = None - self.userns_mode = None - self.networks = None - self.networks_cli_compatible = None - self.oom_killer = None - self.oom_score_adj = None - self.paused = None - self.pid_mode = None - self.pids_limit = None - self.privileged = None - self.purge_networks = None - self.pull = None - self.read_only = None - self.recreate = None - self.removal_wait_timeout = None - self.restart = None - self.restart_retries = None - self.restart_policy = None - self.runtime = None - self.shm_size = None - self.security_opts = None - self.state = None - self.stop_signal = None - self.stop_timeout = None - self.storage_opts = None - self.tmpfs = None - self.tty = None - self.user = None - self.uts = None - self.volumes = None - self.volume_binds = dict() - self.volumes_from = None - self.volume_driver = None - self.working_dir = None - - for key, value in client.module.params.items(): - setattr(self, key, value) - self.comparisons = client.comparisons - - # If state is 'absent', parameters do not have to be parsed or interpreted. - # Only the container's name is needed. - if self.state == 'absent': - return - - if self.default_host_ip: - valid_ip = False - if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.default_host_ip): - valid_ip = True - if re.match(r'^\[[0-9a-fA-F:]+\]$', self.default_host_ip): - valid_ip = True - if re.match(r'^[0-9a-fA-F:]+$', self.default_host_ip): - self.default_host_ip = '[{0}]'.format(self.default_host_ip) - valid_ip = True - if not valid_ip: - self.fail('The value of default_host_ip must be an empty string, an IPv4 address, ' - 'or an IPv6 address. Got "{0}" instead.'.format(self.default_host_ip)) - - if self.cpus is not None: - self.cpus = int(round(self.cpus * 1E9)) - - if self.groups: - # In case integers are passed as groups, we need to convert them to - # strings as docker internally treats them as strings. - self.groups = [to_text(g, errors='surrogate_or_strict') for g in self.groups] - - for param_name in REQUIRES_CONVERSION_TO_BYTES: - if client.module.params.get(param_name): - if param_name == 'memory_swap' and client.module.params.get(param_name) in ['unlimited', '-1']: - setattr(self, param_name, -1) - else: - try: - setattr(self, param_name, human_to_bytes(client.module.params.get(param_name))) - except ValueError as exc: - self.fail("Failed to convert %s to bytes: %s" % (param_name, to_native(exc))) - - self.published_ports = self._parse_publish_ports() - - self.ports = self._parse_exposed_ports(self.published_ports) - self.log("expose ports:") - self.log(self.ports, pretty_print=True) - - self.links = self._parse_links(self.links) - - if self.volumes: - self.volumes = self._expand_host_paths() - - self.tmpfs = self._parse_tmpfs() - self.env = self._get_environment() - self.ulimits = self._parse_ulimits() - self.sysctls = self._parse_sysctls() - self.log_config = self._parse_log_config() - try: - self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck) - except ValueError as e: - self.fail(to_native(e)) - - self.exp_links = None - self.volume_binds = self._get_volume_binds(self.volumes) - self.pid_mode = self._replace_container_names(self.pid_mode) - self.ipc_mode = self._replace_container_names(self.ipc_mode) - self.network_mode = self._replace_container_names(self.network_mode) - - self.log("volumes:") - self.log(self.volumes, pretty_print=True) - self.log("volume binds:") - self.log(self.volume_binds, pretty_print=True) - - if self.networks: - for network in self.networks: - network['id'] = self._get_network_id(network['name']) - if not network['id']: - self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name']) - if network.get('links'): - network['links'] = self._parse_links(network['links']) - - if self.mac_address: - # Ensure the MAC address uses colons instead of hyphens for later comparison - self.mac_address = self.mac_address.replace('-', ':') - - if client.module.params['command_handling'] == 'correct': - if self.entrypoint is not None: - self.entrypoint = [to_text(x, errors='surrogate_or_strict') for x in self.entrypoint] - - if self.command is not None: - if not isinstance(self.command, list): - # convert from str to list - self.command = shlex.split(to_text(self.command, errors='surrogate_or_strict')) - self.command = [to_text(x, errors='surrogate_or_strict') for x in self.command] - else: - if self.entrypoint: - # convert from list to str. - self.entrypoint = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in self.entrypoint])) - self.entrypoint = [to_text(x, errors='surrogate_or_strict') for x in self.entrypoint] - - if self.command: - # convert from list to str - if isinstance(self.command, list): - self.command = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.command]) - else: - self.command = to_text(self.command, errors='surrogate_or_strict') - self.command = [to_text(x, errors='surrogate_or_strict') for x in shlex.split(self.command)] - - self.mounts_opt, self.expected_mounts = self._process_mounts() - - self._check_mount_target_collisions() - - for param_name in ["device_read_bps", "device_write_bps"]: - if client.module.params.get(param_name): - self._process_rate_bps(option=param_name) - - for param_name in ["device_read_iops", "device_write_iops"]: - if client.module.params.get(param_name): - self._process_rate_iops(option=param_name) - - if self.device_requests: - for dr_index, dr in enumerate(self.device_requests): - # Make sure that capabilities are lists of lists of strings - if dr['capabilities']: - for or_index, or_list in enumerate(dr['capabilities']): - for and_index, and_term in enumerate(or_list): - if not isinstance(and_term, string_types): - self.fail( - "device_requests[{0}].capabilities[{1}][{2}] is not a string".format( - dr_index, or_index, and_index)) - or_list[and_index] = to_native(and_term) - # Make sure that options is a dictionary mapping strings to strings - if dr['options']: - dr['options'] = clean_dict_booleans_for_docker_api(dr['options']) - - def fail(self, msg): - self.client.fail(msg) - - @property - def update_parameters(self): - ''' - Returns parameters used to update a container - ''' - - update_parameters = dict( - blkio_weight='blkio_weight', - cpu_period='cpu_period', - cpu_quota='cpu_quota', - cpu_shares='cpu_shares', - cpuset_cpus='cpuset_cpus', - cpuset_mems='cpuset_mems', - mem_limit='memory', - mem_reservation='memory_reservation', - memswap_limit='memory_swap', - kernel_memory='kernel_memory', - restart_policy='restart_policy', - ) - - result = dict() - for key, value in update_parameters.items(): - if getattr(self, value, None) is not None: - if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']: - restart_policy = dict(Name=self.restart_policy, - MaximumRetryCount=self.restart_retries) - result[key] = restart_policy - elif self.client.option_minimal_versions[value]['supported']: - result[key] = getattr(self, value) - return result - - @property - def create_parameters(self): - ''' - Returns parameters used to create a container - ''' - create_params = dict( - command='command', - domainname='domainname', - hostname='hostname', - user='user', - detach='detach', - stdin_open='interactive', - tty='tty', - ports='ports', - environment='env', - name='name', - entrypoint='entrypoint', - mac_address='mac_address', - labels='labels', - stop_signal='stop_signal', - working_dir='working_dir', - stop_timeout='stop_timeout', - healthcheck='healthcheck', - ) - - if self.client.docker_py_version < LooseVersion('3.0'): - # cpu_shares and volume_driver moved to create_host_config in > 3 - create_params['cpu_shares'] = 'cpu_shares' - create_params['volume_driver'] = 'volume_driver' - - result = dict( - host_config=self._host_config(), - volumes=self._get_mounts(), - ) - - for key, value in create_params.items(): - if getattr(self, value, None) is not None: - if self.client.option_minimal_versions[value]['supported']: - result[key] = getattr(self, value) - - if self.disable_healthcheck: - # Make sure image's health check is overridden - result['healthcheck'] = {'test': ['NONE']} - - if self.networks_cli_compatible and self.networks: - network = self.networks[0] - params = dict() - for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): - if network.get(para): - params[para] = network[para] - network_config = dict() - network_config[network['name']] = self.client.create_endpoint_config(**params) - result['networking_config'] = self.client.create_networking_config(network_config) - return result - - def _expand_host_paths(self): - new_vols = [] - for vol in self.volumes: - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - host, container, mode = parts - if not is_volume_permissions(mode): - self.fail('Found invalid volumes mode: {0}'.format(mode)) - if re.match(r'[.~]', host): - host = os.path.abspath(os.path.expanduser(host)) - new_vols.append("%s:%s:%s" % (host, container, mode)) - continue - elif len(parts) == 2: - if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]): - host = os.path.abspath(os.path.expanduser(parts[0])) - new_vols.append("%s:%s:rw" % (host, parts[1])) - continue - new_vols.append(vol) - return new_vols - - def _get_mounts(self): - ''' - Return a list of container mounts. - :return: - ''' - result = [] - if self.volumes: - for vol in self.volumes: - # Only pass anonymous volumes to create container - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - continue - if len(parts) == 2: - if not is_volume_permissions(parts[1]): - continue - result.append(vol) - self.log("mounts:") - self.log(result, pretty_print=True) - return result - - def _host_config(self): - ''' - Returns parameters used to create a HostConfig object - ''' - - host_config_params = dict( - port_bindings='published_ports', - publish_all_ports='publish_all_ports', - links='links', - privileged='privileged', - cgroup_parent='cgroup_parent', - dns='dns_servers', - dns_opt='dns_opts', - dns_search='dns_search_domains', - binds='volume_binds', - volumes_from='volumes_from', - network_mode='network_mode', - userns_mode='userns_mode', - cap_add='capabilities', - cap_drop='cap_drop', - extra_hosts='etc_hosts', - read_only='read_only', - ipc_mode='ipc_mode', - security_opt='security_opts', - ulimits='ulimits', - sysctls='sysctls', - log_config='log_config', - mem_limit='memory', - memswap_limit='memory_swap', - mem_swappiness='memory_swappiness', - oom_score_adj='oom_score_adj', - oom_kill_disable='oom_killer', - shm_size='shm_size', - group_add='groups', - devices='devices', - pid_mode='pid_mode', - tmpfs='tmpfs', - init='init', - uts_mode='uts', - runtime='runtime', - auto_remove='auto_remove', - device_read_bps='device_read_bps', - device_write_bps='device_write_bps', - device_read_iops='device_read_iops', - device_write_iops='device_write_iops', - pids_limit='pids_limit', - mounts='mounts', - nano_cpus='cpus', - storage_opt='storage_opts', - ) - - if self.client.docker_py_version >= LooseVersion('1.9'): - # blkio_weight can always be updated, but can only be set on creation - # when Docker SDK for Python and Docker API are new enough - host_config_params['blkio_weight'] = 'blkio_weight' - - if self.client.docker_py_version >= LooseVersion('3.0'): - # cpu_shares and volume_driver moved to create_host_config in > 3 - host_config_params['cpu_shares'] = 'cpu_shares' - host_config_params['volume_driver'] = 'volume_driver' - - params = dict() - for key, value in host_config_params.items(): - if getattr(self, value, None) is not None: - if self.client.option_minimal_versions[value]['supported']: - params[key] = getattr(self, value) - - if self.restart_policy: - params['restart_policy'] = dict(Name=self.restart_policy, - MaximumRetryCount=self.restart_retries) - - if 'mounts' in params: - params['mounts'] = self.mounts_opt - - if self.device_requests is not None: - params['device_requests'] = [dict((k, v) for k, v in dr.items() if v is not None) for dr in self.device_requests] - - return self.client.create_host_config(**params) - - def get_default_host_ip(self): - if self.default_host_ip is not None: - return self.default_host_ip - ip = '0.0.0.0' - if not self.networks: - return ip - for net in self.networks: - if net.get('name'): - try: - network = self.client.inspect_network(net['name']) - if network.get('Driver') == 'bridge' and \ - network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): - ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] - break - except NotFound as nfe: - self.client.fail( - "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], to_native(nfe)), - exception=traceback.format_exc() - ) - return ip - - def _parse_publish_ports(self): - ''' - Parse ports from docker CLI syntax - ''' - if self.published_ports is None: - return None - - if 'all' in self.published_ports: - self.client.module.fail_json( - msg='Specifying "all" in published_ports is no longer allowed. Set publish_all_ports to "true" instead ' - 'to randomly assign port mappings for those not specified by published_ports.') - - default_ip = self.get_default_host_ip() - - binds = {} - for port in self.published_ports: - parts = split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), self.client) - container_port = parts[-1] - protocol = '' - if '/' in container_port: - container_port, protocol = parts[-1].split('/') - container_ports = parse_port_range(container_port, self.client) - - p_len = len(parts) - if p_len == 1: - port_binds = len(container_ports) * [(default_ip,)] - elif p_len == 2: - if len(container_ports) == 1: - port_binds = [(default_ip, parts[0])] - else: - port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)] - elif p_len == 3: - # We only allow IPv4 and IPv6 addresses for the bind address - ipaddr = parts[0] - if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr): - self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. ' - 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr)) - if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr): - ipaddr = ipaddr[1:-1] - if parts[1]: - if len(container_ports) == 1: - port_binds = [(ipaddr, parts[1])] - else: - port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)] - else: - port_binds = len(container_ports) * [(ipaddr,)] - else: - self.fail(('Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. ' - 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?') % (port, p_len)) - - for bind, container_port in zip(port_binds, container_ports): - idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port - if idx in binds: - old_bind = binds[idx] - if isinstance(old_bind, list): - old_bind.append(bind) - else: - binds[idx] = [old_bind, bind] - else: - binds[idx] = bind - return binds - - def _get_volume_binds(self, volumes): - ''' - Extract host bindings, if any, from list of volume mapping strings. - - :return: dictionary of bind mappings - ''' - result = dict() - if volumes: - for vol in volumes: - host = None - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - host, container, mode = parts - if not is_volume_permissions(mode): - self.fail('Found invalid volumes mode: {0}'.format(mode)) - elif len(parts) == 2: - if not is_volume_permissions(parts[1]): - host, container, mode = (parts + ['rw']) - if host is not None: - result[host] = dict( - bind=container, - mode=mode - ) - return result - - def _parse_exposed_ports(self, published_ports): - ''' - Parse exposed ports from docker CLI-style ports syntax. - ''' - exposed = [] - if self.exposed_ports: - for port in self.exposed_ports: - port = to_text(port, errors='surrogate_or_strict').strip() - protocol = 'tcp' - match = re.search(r'(/.+$)', port) - if match: - protocol = match.group(1).replace('/', '') - port = re.sub(r'/.+$', '', port) - exposed.append((port, protocol)) - if published_ports: - # Any published port should also be exposed - for publish_port in published_ports: - match = False - if isinstance(publish_port, string_types) and '/' in publish_port: - port, protocol = publish_port.split('/') - port = int(port) - else: - protocol = 'tcp' - port = int(publish_port) - for exposed_port in exposed: - if exposed_port[1] != protocol: - continue - if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]: - start_port, end_port = exposed_port[0].split('-') - if int(start_port) <= port <= int(end_port): - match = True - elif exposed_port[0] == port: - match = True - if not match: - exposed.append((port, protocol)) - return exposed - - @staticmethod - def _parse_links(links): - ''' - Turn links into a dictionary - ''' - if links is None: - return None - - result = [] - for link in links: - parsed_link = link.split(':', 1) - if len(parsed_link) == 2: - result.append((parsed_link[0], parsed_link[1])) - else: - result.append((parsed_link[0], parsed_link[0])) - return result - - def _parse_ulimits(self): - ''' - Turn ulimits into an array of Ulimit objects - ''' - if self.ulimits is None: - return None - - results = [] - for limit in self.ulimits: - limits = dict() - pieces = limit.split(':') - if len(pieces) >= 2: - limits['name'] = pieces[0] - limits['soft'] = int(pieces[1]) - limits['hard'] = int(pieces[1]) - if len(pieces) == 3: - limits['hard'] = int(pieces[2]) - try: - results.append(Ulimit(**limits)) - except ValueError as exc: - self.fail("Error parsing ulimits value %s - %s" % (limit, to_native(exc))) - return results - - def _parse_sysctls(self): - ''' - Turn sysctls into an hash of Sysctl objects - ''' - return self.sysctls - - def _parse_log_config(self): - ''' - Create a LogConfig object - ''' - if self.log_driver is None: - return None - - options = dict( - Type=self.log_driver, - Config=dict() - ) - - if self.log_options is not None: - options['Config'] = dict() - for k, v in self.log_options.items(): - if not isinstance(v, string_types): - self.client.module.warn( - "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. " - "If this is not correct, or you want to avoid such warnings, please quote the value." % ( - k, to_text(v, errors='surrogate_or_strict')) - ) - v = to_text(v, errors='surrogate_or_strict') - self.log_options[k] = v - options['Config'][k] = v - - try: - return LogConfig(**options) - except ValueError as exc: - self.fail('Error parsing logging options - %s' % (to_native(exc), )) - - def _parse_tmpfs(self): - ''' - Turn tmpfs into a hash of Tmpfs objects - ''' - result = dict() - if self.tmpfs is None: - return result - - for tmpfs_spec in self.tmpfs: - split_spec = tmpfs_spec.split(":", 1) - if len(split_spec) > 1: - result[split_spec[0]] = split_spec[1] - else: - result[split_spec[0]] = "" - return result - - def _get_environment(self): - """ - If environment file is combined with explicit environment variables, the explicit environment variables - take precedence. - """ - final_env = {} - if self.env_file: - parsed_env_file = utils.parse_env_file(self.env_file) - for name, value in parsed_env_file.items(): - final_env[name] = to_text(value, errors='surrogate_or_strict') - if self.env: - for name, value in self.env.items(): - if not isinstance(value, string_types): - self.fail("Non-string value found for env option. Ambiguous env options must be " - "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )) - final_env[name] = to_text(value, errors='surrogate_or_strict') - return final_env - - def _get_network_id(self, network_name): - network_id = None - try: - for network in self.client.networks(names=[network_name]): - if network['Name'] == network_name: - network_id = network['Id'] - break - except Exception as exc: - self.fail("Error getting network id for %s - %s" % (network_name, to_native(exc))) - return network_id - - def _process_mounts(self): - if self.mounts is None: - return None, None - mounts_list = [] - mounts_expected = [] - for mount in self.mounts: - target = mount['target'] - datatype = mount['type'] - mount_dict = dict(mount) - # Sanity checks (so we don't wait for Docker SDK for Python to barf on input) - if mount_dict.get('source') is None and datatype not in ('tmpfs', 'volume'): - self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype)) - mount_option_types = dict( - volume_driver='volume', - volume_options='volume', - propagation='bind', - no_copy='volume', - labels='volume', - tmpfs_size='tmpfs', - tmpfs_mode='tmpfs', - ) - for option, req_datatype in mount_option_types.items(): - if mount_dict.get(option) is not None and datatype != req_datatype: - self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype)) - # Handle volume_driver and volume_options - volume_driver = mount_dict.pop('volume_driver') - volume_options = mount_dict.pop('volume_options') - if volume_driver: - if volume_options: - volume_options = clean_dict_booleans_for_docker_api(volume_options) - mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options) - if mount_dict['labels']: - mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels']) - if mount_dict.get('tmpfs_size') is not None: - try: - mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size']) - except ValueError as exc: - self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, to_native(exc))) - if mount_dict.get('tmpfs_mode') is not None: - try: - mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8) - except Exception as dummy: - self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target)) - # Fill expected mount dict - mount_expected = dict(mount) - mount_expected['tmpfs_size'] = mount_dict['tmpfs_size'] - mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode'] - # Add result to lists - mounts_list.append(docker_types.Mount(**mount_dict)) - mounts_expected.append(omit_none_from_dict(mount_expected)) - return mounts_list, mounts_expected - - def _process_rate_bps(self, option): - """ - Format device_read_bps and device_write_bps option - """ - devices_list = [] - for v in getattr(self, option): - device_dict = dict((x.title(), y) for x, y in v.items()) - device_dict['Rate'] = human_to_bytes(device_dict['Rate']) - devices_list.append(device_dict) - - setattr(self, option, devices_list) - - def _process_rate_iops(self, option): - """ - Format device_read_iops and device_write_iops option - """ - devices_list = [] - for v in getattr(self, option): - device_dict = dict((x.title(), y) for x, y in v.items()) - devices_list.append(device_dict) - - setattr(self, option, devices_list) - - def _replace_container_names(self, mode): - """ - Parse IPC and PID modes. If they contain a container name, replace - with the container's ID. - """ - if mode is None or not mode.startswith('container:'): - return mode - container_name = mode[len('container:'):] - # Try to inspect container to see whether this is an ID or a - # name (and in the latter case, retrieve it's ID) - container = self.client.get_container(container_name) - if container is None: - # If we can't find the container, issue a warning and continue with - # what the user specified. - self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name)) - return mode - return 'container:{0}'.format(container['Id']) - - def _check_mount_target_collisions(self): - last = dict() - - def f(t, name): - if t in last: - if name == last[t]: - self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name)) - else: - self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t])) - last[t] = name - - if self.expected_mounts: - for t in [m['target'] for m in self.expected_mounts]: - f(t, 'mounts') - if self.volumes: - for v in self.volumes: - vs = v.split(':') - f(vs[0 if len(vs) == 1 else 1], 'volumes') - - -class Container(DockerBaseClass): - - def __init__(self, container, parameters): - super(Container, self).__init__() - self.raw = container - self.Id = None - self.container = container - if container: - self.Id = container['Id'] - self.Image = container['Image'] - self.log(self.container, pretty_print=True) - self.parameters = parameters - self.parameters.expected_links = None - self.parameters.expected_ports = None - self.parameters.expected_exposed = None - self.parameters.expected_volumes = None - self.parameters.expected_ulimits = None - self.parameters.expected_sysctls = None - self.parameters.expected_etc_hosts = None - self.parameters.expected_env = None - self.parameters.expected_device_requests = None - self.parameters_map = dict() - self.parameters_map['expected_labels'] = 'labels' - self.parameters_map['expected_links'] = 'links' - self.parameters_map['expected_ports'] = 'expected_ports' - self.parameters_map['expected_exposed'] = 'exposed_ports' - self.parameters_map['expected_volumes'] = 'volumes' - self.parameters_map['expected_ulimits'] = 'ulimits' - self.parameters_map['expected_sysctls'] = 'sysctls' - self.parameters_map['expected_etc_hosts'] = 'etc_hosts' - self.parameters_map['expected_env'] = 'env' - self.parameters_map['expected_entrypoint'] = 'entrypoint' - self.parameters_map['expected_binds'] = 'volumes' - self.parameters_map['expected_labels'] = 'labels' - self.parameters_map['expected_cmd'] = 'command' - self.parameters_map['expected_devices'] = 'devices' - self.parameters_map['expected_healthcheck'] = 'healthcheck' - self.parameters_map['expected_mounts'] = 'mounts' - self.parameters_map['expected_device_requests'] = 'device_requests' - - def fail(self, msg): - self.parameters.client.fail(msg) - - @property - def exists(self): - return True if self.container else False - - @property - def removing(self): - if self.container and self.container.get('State'): - return self.container['State'].get('Status') == 'removing' - return False - - @property - def running(self): - if self.container and self.container.get('State'): - if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): - return True - return False - - @property - def paused(self): - if self.container and self.container.get('State'): - return self.container['State'].get('Paused', False) - return False - - def _compare(self, a, b, compare): - ''' - Compare values a and b as described in compare. - ''' - return compare_generic(a, b, compare['comparison'], compare['type']) - - def _decode_mounts(self, mounts): - if not mounts: - return mounts - result = [] - empty_dict = dict() - for mount in mounts: - res = dict() - res['type'] = mount.get('Type') - res['source'] = mount.get('Source') - res['target'] = mount.get('Target') - res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False - res['consistency'] = mount.get('Consistency') - res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation') - res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False) - res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict) - res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name') - res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict) - res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes') - res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode') - result.append(res) - return result - - def has_different_configuration(self, image): - ''' - Diff parameters vs existing container config. Returns tuple: (True | False, List of differences) - ''' - self.log('Starting has_different_configuration') - self.parameters.expected_entrypoint = self._get_expected_entrypoint() - self.parameters.expected_links = self._get_expected_links() - self.parameters.expected_ports = self._get_expected_ports() - self.parameters.expected_exposed = self._get_expected_exposed(image) - self.parameters.expected_volumes = self._get_expected_volumes(image) - self.parameters.expected_binds = self._get_expected_binds(image) - self.parameters.expected_labels = self._get_expected_labels(image) - self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits) - self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls) - self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts') - self.parameters.expected_env = self._get_expected_env(image) - self.parameters.expected_cmd = self._get_expected_cmd() - self.parameters.expected_devices = self._get_expected_devices() - self.parameters.expected_healthcheck = self._get_expected_healthcheck() - self.parameters.expected_device_requests = self._get_expected_device_requests() - - if not self.container.get('HostConfig'): - self.fail("has_config_diff: Error parsing container properties. HostConfig missing.") - if not self.container.get('Config'): - self.fail("has_config_diff: Error parsing container properties. Config missing.") - if not self.container.get('NetworkSettings'): - self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.") - - host_config = self.container['HostConfig'] - log_config = host_config.get('LogConfig', dict()) - config = self.container['Config'] - network = self.container['NetworkSettings'] - - # The previous version of the docker module ignored the detach state by - # assuming if the container was running, it must have been detached. - detach = not (config.get('AttachStderr') and config.get('AttachStdout')) - - # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 - if config.get('ExposedPorts') is not None: - expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()] - else: - expected_exposed = [] - - # Map parameters to container inspect results - config_mapping = dict( - expected_cmd=config.get('Cmd'), - domainname=config.get('Domainname'), - hostname=config.get('Hostname'), - user=config.get('User'), - detach=detach, - init=host_config.get('Init'), - interactive=config.get('OpenStdin'), - capabilities=host_config.get('CapAdd'), - cap_drop=host_config.get('CapDrop'), - cgroup_parent=host_config.get('CgroupParent'), - expected_devices=host_config.get('Devices'), - dns_servers=host_config.get('Dns'), - dns_opts=host_config.get('DnsOptions'), - dns_search_domains=host_config.get('DnsSearch'), - expected_env=(config.get('Env') or []), - expected_entrypoint=config.get('Entrypoint'), - expected_etc_hosts=host_config['ExtraHosts'], - expected_exposed=expected_exposed, - groups=host_config.get('GroupAdd'), - ipc_mode=host_config.get("IpcMode"), - expected_labels=config.get('Labels'), - expected_links=host_config.get('Links'), - mac_address=config.get('MacAddress', network.get('MacAddress')), - memory_swappiness=host_config.get('MemorySwappiness'), - network_mode=host_config.get('NetworkMode'), - userns_mode=host_config.get('UsernsMode'), - oom_killer=host_config.get('OomKillDisable'), - oom_score_adj=host_config.get('OomScoreAdj'), - pid_mode=host_config.get('PidMode'), - privileged=host_config.get('Privileged'), - expected_ports=host_config.get('PortBindings'), - read_only=host_config.get('ReadonlyRootfs'), - runtime=host_config.get('Runtime'), - shm_size=host_config.get('ShmSize'), - security_opts=host_config.get("SecurityOpt"), - stop_signal=config.get("StopSignal"), - tmpfs=host_config.get('Tmpfs'), - tty=config.get('Tty'), - expected_ulimits=host_config.get('Ulimits'), - expected_sysctls=host_config.get('Sysctls'), - uts=host_config.get('UTSMode'), - expected_volumes=config.get('Volumes'), - expected_binds=host_config.get('Binds'), - volume_driver=host_config.get('VolumeDriver'), - volumes_from=host_config.get('VolumesFrom'), - working_dir=config.get('WorkingDir'), - publish_all_ports=host_config.get('PublishAllPorts'), - expected_healthcheck=config.get('Healthcheck'), - disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']), - device_read_bps=host_config.get('BlkioDeviceReadBps'), - device_write_bps=host_config.get('BlkioDeviceWriteBps'), - device_read_iops=host_config.get('BlkioDeviceReadIOps'), - device_write_iops=host_config.get('BlkioDeviceWriteIOps'), - expected_device_requests=host_config.get('DeviceRequests'), - pids_limit=host_config.get('PidsLimit'), - storage_opts=host_config.get('StorageOpt'), - # According to https://github.com/moby/moby/, support for HostConfig.Mounts - # has been included at least since v17.03.0-ce, which has API version 1.26. - # The previous tag, v1.9.1, has API version 1.21 and does not have - # HostConfig.Mounts. I have no idea what about API 1.25... - expected_mounts=self._decode_mounts(host_config.get('Mounts')), - cpus=host_config.get('NanoCpus'), - ) - # Options which don't make sense without their accompanying option - if self.parameters.log_driver: - config_mapping['log_driver'] = log_config.get('Type') - config_mapping['log_options'] = log_config.get('Config') - - if self.parameters.client.option_minimal_versions['auto_remove']['supported']: - # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately - # it has a default value, that's why we have to jump through the hoops here - config_mapping['auto_remove'] = host_config.get('AutoRemove') - - if self.parameters.client.option_minimal_versions['stop_timeout']['supported']: - # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that - # stop_timeout has a hybrid role, in that it used to be something only used - # for stopping containers, and is now also used as a container property. - # That's why it needs special handling here. - config_mapping['stop_timeout'] = config.get('StopTimeout') - - differences = DifferenceTracker() - for key, value in config_mapping.items(): - minimal_version = self.parameters.client.option_minimal_versions.get(key, {}) - if not minimal_version.get('supported', True): - continue - compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] - self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), to_text(value, errors='surrogate_or_strict'), compare)) - if getattr(self.parameters, key, None) is not None: - match = self._compare(getattr(self.parameters, key), value, compare) - - if not match: - if key == 'expected_healthcheck' and config_mapping['disable_healthcheck'] and self.parameters.disable_healthcheck: - # If the healthcheck is disabled (both in parameters and for the current container), and the user - # requested strict comparison for healthcheck, the comparison will fail. That's why we ignore the - # expected_healthcheck comparison in this case. - continue - - if key == 'expected_labels' and compare['comparison'] == 'strict' and self.parameters.image_label_mismatch == 'fail': - # If there are labels from the base image that should be removed and - # base_image_mismatch is fail we want raise an error. - image_labels = self._get_image_labels(image) - would_remove_labels = [] - for label in image_labels: - if label not in self.parameters.labels: - # Format label for error message - would_remove_labels.append(label) - if would_remove_labels: - msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore" - " this error. Labels: {0}") - self.fail(msg.format(', '.join(['"%s"' % label for label in would_remove_labels]))) - - # no match. record the differences - p = getattr(self.parameters, key) - c = value - if compare['type'] == 'set': - # Since the order does not matter, sort so that the diff output is better. - if p is not None: - p = sorted(p) - if c is not None: - c = sorted(c) - elif compare['type'] == 'set(dict)': - # Since the order does not matter, sort so that the diff output is better. - if key == 'expected_mounts': - # For selected values, use one entry as key - def sort_key_fn(x): - return x['target'] - else: - # We sort the list of dictionaries by using the sorted items of a dict as its key. - def sort_key_fn(x): - return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items()) - if p is not None: - p = sorted(p, key=sort_key_fn) - if c is not None: - c = sorted(c, key=sort_key_fn) - differences.add(key, parameter=p, active=c) - - has_differences = not differences.empty - return has_differences, differences - - def has_different_resource_limits(self): - ''' - Diff parameters and container resource limits - ''' - if not self.container.get('HostConfig'): - self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.") - - host_config = self.container['HostConfig'] - - restart_policy = host_config.get('RestartPolicy') or dict() - - config_mapping = dict( - blkio_weight=host_config.get('BlkioWeight'), - cpu_period=host_config.get('CpuPeriod'), - cpu_quota=host_config.get('CpuQuota'), - cpu_shares=host_config.get('CpuShares'), - cpuset_cpus=host_config.get('CpusetCpus'), - cpuset_mems=host_config.get('CpusetMems'), - kernel_memory=host_config.get("KernelMemory"), - memory=host_config.get('Memory'), - memory_reservation=host_config.get('MemoryReservation'), - memory_swap=host_config.get('MemorySwap'), - restart_policy=restart_policy.get('Name') - ) - - # Options which don't make sense without their accompanying option - if self.parameters.restart_policy: - config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount') - - differences = DifferenceTracker() - for key, value in config_mapping.items(): - if getattr(self.parameters, key, None): - compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)] - match = self._compare(getattr(self.parameters, key), value, compare) - - if not match: - # no match. record the differences - differences.add(key, parameter=getattr(self.parameters, key), active=value) - different = not differences.empty - return different, differences - - def has_network_differences(self): - ''' - Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 - ''' - different = False - differences = [] - - if not self.parameters.networks: - return different, differences - - if not self.container.get('NetworkSettings'): - self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") - - connected_networks = self.container['NetworkSettings']['Networks'] - for network in self.parameters.networks: - network_info = connected_networks.get(network['name']) - if network_info is None: - different = True - differences.append(dict( - parameter=network, - container=None - )) - else: - diff = False - network_info_ipam = network_info.get('IPAMConfig') or {} - if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'): - diff = True - if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'): - diff = True - if network.get('aliases'): - if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'): - diff = True - if network.get('links'): - expected_links = [] - for link, alias in network['links']: - expected_links.append("%s:%s" % (link, alias)) - if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): - diff = True - if diff: - different = True - differences.append(dict( - parameter=network, - container=dict( - name=network['name'], - ipv4_address=network_info_ipam.get('IPv4Address'), - ipv6_address=network_info_ipam.get('IPv6Address'), - aliases=network_info.get('Aliases'), - links=network_info.get('Links') - ) - )) - return different, differences - - def has_extra_networks(self): - ''' - Check if the container is connected to non-requested networks - ''' - extra_networks = [] - extra = False - - if not self.container.get('NetworkSettings'): - self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") - - connected_networks = self.container['NetworkSettings'].get('Networks') - if connected_networks: - for network, network_config in connected_networks.items(): - keep = False - if self.parameters.networks: - for expected_network in self.parameters.networks: - if expected_network['name'] == network: - keep = True - if not keep: - extra = True - extra_networks.append(dict(name=network, id=network_config['NetworkID'])) - return extra, extra_networks - - def _get_expected_devices(self): - if not self.parameters.devices: - return None - expected_devices = [] - for device in self.parameters.devices: - parts = device.split(':') - if len(parts) == 1: - expected_devices.append( - dict( - CgroupPermissions='rwm', - PathInContainer=parts[0], - PathOnHost=parts[0] - )) - elif len(parts) == 2: - parts = device.split(':') - expected_devices.append( - dict( - CgroupPermissions='rwm', - PathInContainer=parts[1], - PathOnHost=parts[0] - ) - ) - else: - expected_devices.append( - dict( - CgroupPermissions=parts[2], - PathInContainer=parts[1], - PathOnHost=parts[0] - )) - return expected_devices - - def _get_expected_entrypoint(self): - if self.parameters.client.module.params['command_handling'] != 'correct' and not self.parameters.entrypoint: - return None - return self.parameters.entrypoint - - def _get_expected_ports(self): - if self.parameters.published_ports is None: - return None - expected_bound_ports = {} - for container_port, config in self.parameters.published_ports.items(): - if isinstance(container_port, int): - container_port = "%s/tcp" % container_port - if len(config) == 1: - if isinstance(config[0], int): - expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}] - else: - expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}] - elif isinstance(config[0], tuple): - expected_bound_ports[container_port] = [] - for host_ip, host_port in config: - expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')}) - else: - expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}] - return expected_bound_ports - - def _get_expected_links(self): - if self.parameters.links is None: - return None - self.log('parameter links:') - self.log(self.parameters.links, pretty_print=True) - exp_links = [] - for link, alias in self.parameters.links: - exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias)) - return exp_links - - def _get_expected_binds(self, image): - self.log('_get_expected_binds') - image_vols = [] - if image: - image_vols = self._get_image_binds(image['Config'].get('Volumes')) - param_vols = [] - if self.parameters.volumes: - for vol in self.parameters.volumes: - host = None - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - host, container, mode = parts - if not is_volume_permissions(mode): - self.fail('Found invalid volumes mode: {0}'.format(mode)) - if len(parts) == 2: - if not is_volume_permissions(parts[1]): - host, container, mode = parts + ['rw'] - if host: - param_vols.append("%s:%s:%s" % (host, container, mode)) - result = list(set(image_vols + param_vols)) - self.log("expected_binds:") - self.log(result, pretty_print=True) - return result - - def _get_expected_labels(self, image): - if self.parameters.labels is None: - return None - if self.parameters.image_label_mismatch == 'ignore': - expected_labels = dict(self._get_image_labels(image)) - else: - expected_labels = {} - expected_labels.update(self.parameters.labels) - return expected_labels - - def _get_image_labels(self, image): - if not image: - return {} - - # Can't use get('Labels', {}) because 'Labels' may be present and be None - return image['Config'].get('Labels') or {} - - def _get_expected_device_requests(self): - if self.parameters.device_requests is None: - return None - device_requests = [] - for dr in self.parameters.device_requests: - device_requests.append({ - 'Driver': dr['driver'], - 'Count': dr['count'], - 'DeviceIDs': dr['device_ids'], - 'Capabilities': dr['capabilities'], - 'Options': dr['options'], - }) - return device_requests - - def _get_image_binds(self, volumes): - ''' - Convert array of binds to array of strings with format host_path:container_path:mode - - :param volumes: array of bind dicts - :return: array of strings - ''' - results = [] - if isinstance(volumes, dict): - results += self._get_bind_from_dict(volumes) - elif isinstance(volumes, list): - for vol in volumes: - results += self._get_bind_from_dict(vol) - return results - - @staticmethod - def _get_bind_from_dict(volume_dict): - results = [] - if volume_dict: - for host_path, config in volume_dict.items(): - if isinstance(config, dict) and config.get('bind'): - container_path = config.get('bind') - mode = config.get('mode', 'rw') - results.append("%s:%s:%s" % (host_path, container_path, mode)) - return results - - def _get_expected_volumes(self, image): - self.log('_get_expected_volumes') - expected_vols = dict() - if image and image['Config'].get('Volumes'): - expected_vols.update(image['Config'].get('Volumes')) - - if self.parameters.volumes: - for vol in self.parameters.volumes: - # We only expect anonymous volumes to show up in the list - if ':' in vol: - parts = vol.split(':') - if len(parts) == 3: - continue - if len(parts) == 2: - if not is_volume_permissions(parts[1]): - continue - expected_vols[vol] = dict() - - if not expected_vols: - expected_vols = None - self.log("expected_volumes:") - self.log(expected_vols, pretty_print=True) - return expected_vols - - def _get_expected_env(self, image): - self.log('_get_expected_env') - expected_env = dict() - if image and image['Config'].get('Env'): - for env_var in image['Config']['Env']: - parts = env_var.split('=', 1) - expected_env[parts[0]] = parts[1] - if self.parameters.env: - expected_env.update(self.parameters.env) - param_env = [] - for key, value in expected_env.items(): - param_env.append("%s=%s" % (key, value)) - return param_env - - def _get_expected_exposed(self, image): - self.log('_get_expected_exposed') - image_ports = [] - if image: - image_exposed_ports = image['Config'].get('ExposedPorts') or {} - image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()] - param_ports = [] - if self.parameters.ports: - param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in self.parameters.ports] - result = list(set(image_ports + param_ports)) - self.log(result, pretty_print=True) - return result - - def _get_expected_ulimits(self, config_ulimits): - self.log('_get_expected_ulimits') - if config_ulimits is None: - return None - results = [] - for limit in config_ulimits: - results.append(dict( - Name=limit.name, - Soft=limit.soft, - Hard=limit.hard - )) - return results - - def _get_expected_sysctls(self, config_sysctls): - self.log('_get_expected_sysctls') - if config_sysctls is None: - return None - result = dict() - for key, value in config_sysctls.items(): - result[key] = to_text(value, errors='surrogate_or_strict') - return result - - def _get_expected_cmd(self): - self.log('_get_expected_cmd') - if self.parameters.client.module.params['command_handling'] != 'correct' and not self.parameters.command: - return None - return self.parameters.command - - def _convert_simple_dict_to_list(self, param_name, join_with=':'): - if getattr(self.parameters, param_name, None) is None: - return None - results = [] - for key, value in getattr(self.parameters, param_name).items(): - results.append("%s%s%s" % (key, join_with, value)) - return results - - def _normalize_port(self, port): - if '/' not in port: - return port + '/tcp' - return port - - def _get_expected_healthcheck(self): - self.log('_get_expected_healthcheck') - expected_healthcheck = dict() - - if self.parameters.healthcheck: - expected_healthcheck.update([(k.title().replace("_", ""), v) - for k, v in self.parameters.healthcheck.items()]) - - return expected_healthcheck - - -class ContainerManager(DockerBaseClass): - ''' - Perform container management tasks - ''' - - def __init__(self, client): - - super(ContainerManager, self).__init__() - - if client.module.params.get('log_options') and not client.module.params.get('log_driver'): - client.module.warn('log_options is ignored when log_driver is not specified') - if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'): - client.module.warn('healthcheck is ignored when test is not specified') - if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'): - client.module.warn('restart_retries is ignored when restart_policy is not specified') - - self.client = client - self.parameters = TaskParameters(client) - self.check_mode = self.client.check_mode - self.results = {'changed': False, 'actions': []} - self.diff = {} - self.diff_tracker = DifferenceTracker() - self.facts = {} - - state = self.parameters.state - if state in ('stopped', 'started', 'present'): - self.present(state) - elif state == 'absent': - self.absent() - - if not self.check_mode and not self.parameters.debug: - self.results.pop('actions') - - if self.client.module._diff or self.parameters.debug: - self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after() - self.results['diff'] = self.diff - - if self.facts: - self.results['container'] = self.facts - - def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None): - delay = 1.0 - total_wait = 0 - while True: - # Inspect container - result = self.client.get_container_by_id(container_id) - if result is None: - if accept_removal: - return - msg = 'Encontered vanished container while waiting for container "{0}"' - self.fail(msg.format(container_id)) - # Check container state - state = result.get('State', {}).get('Status') - if complete_states is not None and state in complete_states: - return - if wait_states is not None and state not in wait_states: - msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"' - self.fail(msg.format(container_id, state)) - # Wait - if max_wait is not None: - if total_wait > max_wait: - msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"' - self.fail(msg.format(container_id, max_wait)) - if total_wait + delay > max_wait: - delay = max_wait - total_wait - sleep(delay) - total_wait += delay - # Exponential backoff, but never wait longer than 10 seconds - # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations - # until the maximal 10 seconds delay is reached. By then, the - # code will have slept for ~1.5 minutes.) - delay = min(delay * 1.1, 10) - - def present(self, state): - container = self._get_container(self.parameters.name) - was_running = container.running - was_paused = container.paused - container_created = False - - # If the image parameter was passed then we need to deal with the image - # version comparison. Otherwise we handle this depending on whether - # the container already runs or not; in the former case, in case the - # container needs to be restarted, we use the existing container's - # image ID. - image = self._get_image() - self.log(image, pretty_print=True) - if not container.exists or container.removing: - # New container - if container.removing: - self.log('Found container in removal phase') - else: - self.log('No container found') - if not self.parameters.image: - self.fail('Cannot create container when image is not specified!') - self.diff_tracker.add('exists', parameter=True, active=False) - if container.removing and not self.check_mode: - # Wait for container to be removed before trying to create it - self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout) - new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) - if new_container: - container = new_container - container_created = True - else: - # Existing container - different, differences = container.has_different_configuration(image) - image_different = False - if self.parameters.comparisons['image']['comparison'] == 'strict': - image_different = self._image_is_different(image, container) - if image_different or different or self.parameters.recreate: - self.diff_tracker.merge(differences) - self.diff['differences'] = differences.get_legacy_docker_container_diffs() - if image_different: - self.diff['image_different'] = True - self.log("differences") - self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True) - image_to_use = self.parameters.image - if not image_to_use and container and container.Image: - image_to_use = container.Image - if not image_to_use: - self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!') - if container.running: - self.container_stop(container.Id) - self.container_remove(container.Id) - if not self.check_mode: - self.wait_for_state( - container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout) - new_container = self.container_create(image_to_use, self.parameters.create_parameters) - if new_container: - container = new_container - container_created = True - - if container and container.exists: - container = self.update_limits(container) - container = self.update_networks(container, container_created) - - if state == 'started' and not container.running: - self.diff_tracker.add('running', parameter=True, active=was_running) - container = self.container_start(container.Id) - elif state == 'started' and self.parameters.restart: - self.diff_tracker.add('running', parameter=True, active=was_running) - self.diff_tracker.add('restarted', parameter=True, active=False) - container = self.container_restart(container.Id) - elif state == 'stopped' and container.running: - self.diff_tracker.add('running', parameter=False, active=was_running) - self.container_stop(container.Id) - container = self._get_container(container.Id) - - if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused: - self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused) - if not self.check_mode: - try: - if self.parameters.paused: - self.client.pause(container=container.Id) - else: - self.client.unpause(container=container.Id) - except Exception as exc: - self.fail("Error %s container %s: %s" % ( - "pausing" if self.parameters.paused else "unpausing", container.Id, to_native(exc) - )) - container = self._get_container(container.Id) - self.results['changed'] = True - self.results['actions'].append(dict(set_paused=self.parameters.paused)) - - self.facts = container.raw - - def absent(self): - container = self._get_container(self.parameters.name) - if container.exists: - if container.running: - self.diff_tracker.add('running', parameter=False, active=True) - self.container_stop(container.Id) - self.diff_tracker.add('exists', parameter=False, active=True) - self.container_remove(container.Id) - - def fail(self, msg, **kwargs): - self.client.fail(msg, **kwargs) - - def _output_logs(self, msg): - self.client.module.log(msg=msg) - - def _get_container(self, container): - ''' - Expects container ID or Name. Returns a container object - ''' - return Container(self.client.get_container(container), self.parameters) - - def _get_image(self): - if not self.parameters.image: - self.log('No image specified') - return None - if is_image_name_id(self.parameters.image): - image = self.client.find_image_by_id(self.parameters.image) - else: - repository, tag = utils.parse_repository_tag(self.parameters.image) - if not tag: - tag = "latest" - image = self.client.find_image(repository, tag) - if not image or self.parameters.pull: - if not self.check_mode: - self.log("Pull the image.") - image, alreadyToLatest = self.client.pull_image(repository, tag) - if alreadyToLatest: - self.results['changed'] = False - else: - self.results['changed'] = True - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) - elif not image: - # If the image isn't there, claim we'll pull. - # (Implicitly: if the image is there, claim it already was latest.) - self.results['changed'] = True - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) - - self.log("image") - self.log(image, pretty_print=True) - return image - - def _image_is_different(self, image, container): - if image and image.get('Id'): - if container and container.Image: - if image.get('Id') != container.Image: - self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image) - return True - return False - - def update_limits(self, container): - limits_differ, different_limits = container.has_different_resource_limits() - if limits_differ: - self.log("limit differences:") - self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) - self.diff_tracker.merge(different_limits) - if limits_differ and not self.check_mode: - self.container_update(container.Id, self.parameters.update_parameters) - return self._get_container(container.Id) - return container - - def update_networks(self, container, container_created): - updated_container = container - if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created: - has_network_differences, network_differences = container.has_network_differences() - if has_network_differences: - if self.diff.get('differences'): - self.diff['differences'].append(dict(network_differences=network_differences)) - else: - self.diff['differences'] = [dict(network_differences=network_differences)] - for netdiff in network_differences: - self.diff_tracker.add( - 'network.{0}'.format(netdiff['parameter']['name']), - parameter=netdiff['parameter'], - active=netdiff['container'] - ) - self.results['changed'] = True - updated_container = self._add_networks(container, network_differences) - - if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks: - has_extra_networks, extra_networks = container.has_extra_networks() - if has_extra_networks: - if self.diff.get('differences'): - self.diff['differences'].append(dict(purge_networks=extra_networks)) - else: - self.diff['differences'] = [dict(purge_networks=extra_networks)] - for extra_network in extra_networks: - self.diff_tracker.add( - 'network.{0}'.format(extra_network['name']), - active=extra_network - ) - self.results['changed'] = True - updated_container = self._purge_networks(container, extra_networks) - return updated_container - - def _add_networks(self, container, differences): - for diff in differences: - # remove the container from the network, if connected - if diff.get('container'): - self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) - if not self.check_mode: - try: - self.client.disconnect_container_from_network(container.Id, diff['parameter']['id']) - except Exception as exc: - self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], - to_native(exc))) - # connect to the network - params = dict() - for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'): - if diff['parameter'].get(para): - params[para] = diff['parameter'][para] - self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) - if not self.check_mode: - try: - self.log("Connecting container to network %s" % diff['parameter']['id']) - self.log(params, pretty_print=True) - self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params) - except Exception as exc: - self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc))) - return self._get_container(container.Id) - - def _purge_networks(self, container, networks): - for network in networks: - self.results['actions'].append(dict(removed_from_network=network['name'])) - if not self.check_mode: - try: - self.client.disconnect_container_from_network(container.Id, network['name']) - except Exception as exc: - self.fail("Error disconnecting container from network %s - %s" % (network['name'], - to_native(exc))) - return self._get_container(container.Id) - - def container_create(self, image, create_parameters): - self.log("create container") - self.log("image: %s parameters:" % image) - self.log(create_parameters, pretty_print=True) - self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) - self.results['changed'] = True - new_container = None - if not self.check_mode: - try: - new_container = self.client.create_container(image, **create_parameters) - self.client.report_warnings(new_container) - except Exception as exc: - self.fail("Error creating container: %s" % to_native(exc)) - return self._get_container(new_container['Id']) - return new_container - - def container_start(self, container_id): - self.log("start container %s" % (container_id)) - self.results['actions'].append(dict(started=container_id)) - self.results['changed'] = True - if not self.check_mode: - try: - self.client.start(container=container_id) - except Exception as exc: - self.fail("Error starting container %s: %s" % (container_id, to_native(exc))) - - if self.parameters.detach is False: - if self.client.docker_py_version >= LooseVersion('3.0'): - status = self.client.wait(container_id)['StatusCode'] - else: - status = self.client.wait(container_id) - self.client.fail_results['status'] = status - self.results['status'] = status - - if self.parameters.auto_remove: - output = "Cannot retrieve result as auto_remove is enabled" - if self.parameters.output_logs: - self.client.module.warn('Cannot output_logs if auto_remove is enabled!') - else: - config = self.client.inspect_container(container_id) - logging_driver = config['HostConfig']['LogConfig']['Type'] - - if logging_driver in ('json-file', 'journald', 'local'): - output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False) - if self.parameters.output_logs: - self._output_logs(msg=output) - else: - output = "Result logged using `%s` driver" % logging_driver - - if self.parameters.cleanup: - self.container_remove(container_id, force=True) - insp = self._get_container(container_id) - if insp.raw: - insp.raw['Output'] = output - else: - insp.raw = dict(Output=output) - if status != 0: - # Set `failed` to True and return output as msg - self.results['failed'] = True - self.results['msg'] = output - return insp - return self._get_container(container_id) - - def container_remove(self, container_id, link=False, force=False): - volume_state = (not self.parameters.keep_volumes) - self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) - self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) - self.results['changed'] = True - response = None - if not self.check_mode: - count = 0 - while True: - try: - response = self.client.remove_container(container_id, v=volume_state, link=link, force=force) - except NotFound as dummy: - pass - except APIError as exc: - if 'Unpause the container before stopping or killing' in exc.explanation: - # New docker daemon versions do not allow containers to be removed - # if they are paused. Make sure we don't end up in an infinite loop. - if count == 3: - self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) - count += 1 - # Unpause - try: - self.client.unpause(container=container_id) - except Exception as exc2: - self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) - # Now try again - continue - if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation: - pass - else: - self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) - except Exception as exc: - self.fail("Error removing container %s: %s" % (container_id, to_native(exc))) - # We only loop when explicitly requested by 'continue' - break - return response - - def container_update(self, container_id, update_parameters): - if update_parameters: - self.log("update container %s" % (container_id)) - self.log(update_parameters, pretty_print=True) - self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) - self.results['changed'] = True - if not self.check_mode and callable(getattr(self.client, 'update_container')): - try: - result = self.client.update_container(container_id, **update_parameters) - self.client.report_warnings(result) - except Exception as exc: - self.fail("Error updating container %s: %s" % (container_id, to_native(exc))) - return self._get_container(container_id) - - def container_kill(self, container_id): - self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal)) - self.results['changed'] = True - response = None - if not self.check_mode: - try: - if self.parameters.kill_signal: - response = self.client.kill(container_id, signal=self.parameters.kill_signal) - else: - response = self.client.kill(container_id) - except Exception as exc: - self.fail("Error killing container %s: %s" % (container_id, to_native(exc))) - return response - - def container_restart(self, container_id): - self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout)) - self.results['changed'] = True - if not self.check_mode: - try: - if self.parameters.stop_timeout: - dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout) - else: - dummy = self.client.restart(container_id) - except Exception as exc: - self.fail("Error restarting container %s: %s" % (container_id, to_native(exc))) - return self._get_container(container_id) - - def container_stop(self, container_id): - if self.parameters.force_kill: - self.container_kill(container_id) - return - self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout)) - self.results['changed'] = True - response = None - if not self.check_mode: - count = 0 - while True: - try: - if self.parameters.stop_timeout: - response = self.client.stop(container_id, timeout=self.parameters.stop_timeout) - else: - response = self.client.stop(container_id) - except APIError as exc: - if 'Unpause the container before stopping or killing' in exc.explanation: - # New docker daemon versions do not allow containers to be removed - # if they are paused. Make sure we don't end up in an infinite loop. - if count == 3: - self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc))) - count += 1 - # Unpause - try: - self.client.unpause(container=container_id) - except Exception as exc2: - self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2))) - # Now try again - continue - self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) - except Exception as exc: - self.fail("Error stopping container %s: %s" % (container_id, to_native(exc))) - # We only loop when explicitly requested by 'continue' - break - return response - - -def detect_ipvX_address_usage(client): - ''' - Helper function to detect whether any specified network uses ipv4_address or ipv6_address - ''' - for network in client.module.params.get("networks") or []: - if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None: - return True - return False - - -class AnsibleDockerClientContainer(AnsibleDockerClient): - # A list of module options which are not docker container properties - __NON_CONTAINER_PROPERTY_OPTIONS = tuple([ - 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks', - 'recreate', 'restart', 'state', 'networks', 'cleanup', 'kill_signal', - 'output_logs', 'paused', 'removal_wait_timeout', 'default_host_ip', 'command_handling', - ] + list(DOCKER_COMMON_ARGS.keys())) - - def _parse_comparisons(self): - comparisons = {} - comp_aliases = {} - # Put in defaults - explicit_types = dict( - command='list', - devices='set(dict)', - device_requests='set(dict)', - dns_search_domains='list', - dns_servers='list', - env='set', - entrypoint='list', - etc_hosts='set', - mounts='set(dict)', - networks='set(dict)', - ulimits='set(dict)', - device_read_bps='set(dict)', - device_write_bps='set(dict)', - device_read_iops='set(dict)', - device_write_iops='set(dict)', - ) - all_options = set() # this is for improving user feedback when a wrong option was specified for comparison - default_values = dict( - stop_timeout='ignore', - ) - for option, data in self.module.argument_spec.items(): - all_options.add(option) - for alias in data.get('aliases', []): - all_options.add(alias) - # Ignore options which aren't used as container properties - if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks': - continue - # Determine option type - if option in explicit_types: - datatype = explicit_types[option] - elif data['type'] == 'list': - datatype = 'set' - elif data['type'] == 'dict': - datatype = 'dict' - else: - datatype = 'value' - # Determine comparison type - if option in default_values: - comparison = default_values[option] - elif datatype in ('list', 'value'): - comparison = 'strict' - else: - comparison = 'allow_more_present' - comparisons[option] = dict(type=datatype, comparison=comparison, name=option) - # Keep track of aliases - comp_aliases[option] = option - for alias in data.get('aliases', []): - comp_aliases[alias] = option - # Process legacy ignore options - if self.module.params['ignore_image']: - comparisons['image']['comparison'] = 'ignore' - if self.module.params['purge_networks']: - comparisons['networks']['comparison'] = 'strict' - # Process options - if self.module.params.get('comparisons'): - # If '*' appears in comparisons, process it first - if '*' in self.module.params['comparisons']: - value = self.module.params['comparisons']['*'] - if value not in ('strict', 'ignore'): - self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!") - for option, v in comparisons.items(): - if option == 'networks': - # `networks` is special: only update if - # some value is actually specified - if self.module.params['networks'] is None: - continue - v['comparison'] = value - # Now process all other comparisons. - comp_aliases_used = {} - for key, value in self.module.params['comparisons'].items(): - if key == '*': - continue - # Find main key - key_main = comp_aliases.get(key) - if key_main is None: - if key_main in all_options: - self.fail("The module option '%s' cannot be specified in the comparisons dict, " - "since it does not correspond to container's state!" % key) - self.fail("Unknown module option '%s' in comparisons dict!" % key) - if key_main in comp_aliases_used: - self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main)) - comp_aliases_used[key_main] = key - # Check value and update accordingly - if value in ('strict', 'ignore'): - comparisons[key_main]['comparison'] = value - elif value == 'allow_more_present': - if comparisons[key_main]['type'] == 'value': - self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value)) - comparisons[key_main]['comparison'] = value - else: - self.fail("Unknown comparison mode '%s'!" % value) - # Add implicit options - comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports') - comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports') - comparisons['disable_healthcheck'] = dict(type='value', - comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict', - name='disable_healthcheck') - # Check legacy values - if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore': - self.module.warn('The ignore_image option has been overridden by the comparisons option!') - if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict': - self.module.warn('The purge_networks option has been overridden by the comparisons option!') - self.comparisons = comparisons - - def _get_additional_minimal_versions(self): - stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent' - stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1') - if stop_timeout_needed_for_update and not stop_timeout_supported: - # We warn (instead of fail) since in older versions, stop_timeout was not used - # to update the container's configuration, but only when stopping a container. - self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update " - "the container's stop_timeout configuration. " - "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,)) - self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported - - def __init__(self, **kwargs): - option_minimal_versions = dict( - # internal options - log_config=dict(), - publish_all_ports=dict(), - ports=dict(), - volume_binds=dict(), - name=dict(), - # normal options - device_read_bps=dict(docker_py_version='1.9.0'), - device_read_iops=dict(docker_py_version='1.9.0'), - device_write_bps=dict(docker_py_version='1.9.0'), - device_write_iops=dict(docker_py_version='1.9.0'), - device_requests=dict(docker_py_version='4.3.0', docker_api_version='1.40'), - dns_opts=dict(docker_py_version='1.10.0'), - auto_remove=dict(docker_py_version='2.1.0'), - healthcheck=dict(docker_py_version='2.0.0'), - init=dict(docker_py_version='2.2.0'), - runtime=dict(docker_py_version='2.4.0'), - sysctls=dict(docker_py_version='1.10.0'), - userns_mode=dict(docker_py_version='1.10.0'), - uts=dict(docker_py_version='3.5.0'), - pids_limit=dict(docker_py_version='1.10.0'), - mounts=dict(docker_py_version='2.6.0'), - cpus=dict(docker_py_version='2.3.0'), - storage_opts=dict(docker_py_version='2.1.0'), - # specials - ipvX_address_supported=dict(docker_py_version='1.9.0', - detect_usage=detect_ipvX_address_usage, - usage_msg='ipv4_address or ipv6_address in networks'), - stop_timeout=dict(), # see _get_additional_minimal_versions() - ) - - super(AnsibleDockerClientContainer, self).__init__( - option_minimal_versions=option_minimal_versions, - option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS, - **kwargs - ) - - self._get_additional_minimal_versions() - self._parse_comparisons() - - if self.module.params['container_default_behavior'] == 'compatibility': - old_default_values = dict( - auto_remove=False, - detach=True, - init=False, - interactive=False, - memory="0", - paused=False, - privileged=False, - read_only=False, - tty=False, - ) - for param, value in old_default_values.items(): - if self.module.params[param] is None: - self.module.params[param] = value +from ansible_collections.community.docker.plugins.module_utils.module_container.module import ( + run_module, +) def main(): - argument_spec = dict( - auto_remove=dict(type='bool'), - blkio_weight=dict(type='int'), - capabilities=dict(type='list', elements='str'), - cap_drop=dict(type='list', elements='str'), - cgroup_parent=dict(type='str'), - cleanup=dict(type='bool', default=False), - command=dict(type='raw'), - comparisons=dict(type='dict'), - container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']), - command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'), - cpu_period=dict(type='int'), - cpu_quota=dict(type='int'), - cpus=dict(type='float'), - cpuset_cpus=dict(type='str'), - cpuset_mems=dict(type='str'), - cpu_shares=dict(type='int'), - default_host_ip=dict(type='str'), - detach=dict(type='bool'), - devices=dict(type='list', elements='str'), - device_read_bps=dict(type='list', elements='dict', options=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='str'), - )), - device_write_bps=dict(type='list', elements='dict', options=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='str'), - )), - device_read_iops=dict(type='list', elements='dict', options=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='int'), - )), - device_write_iops=dict(type='list', elements='dict', options=dict( - path=dict(required=True, type='str'), - rate=dict(required=True, type='int'), - )), - device_requests=dict(type='list', elements='dict', options=dict( - capabilities=dict(type='list', elements='list'), - count=dict(type='int'), - device_ids=dict(type='list', elements='str'), - driver=dict(type='str'), - options=dict(type='dict'), - )), - dns_servers=dict(type='list', elements='str'), - dns_opts=dict(type='list', elements='str'), - dns_search_domains=dict(type='list', elements='str'), - domainname=dict(type='str'), - entrypoint=dict(type='list', elements='str'), - env=dict(type='dict'), - env_file=dict(type='path'), - etc_hosts=dict(type='dict'), - exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']), - force_kill=dict(type='bool', default=False, aliases=['forcekill']), - groups=dict(type='list', elements='str'), - healthcheck=dict(type='dict', options=dict( - test=dict(type='raw'), - interval=dict(type='str'), - timeout=dict(type='str'), - start_period=dict(type='str'), - retries=dict(type='int'), - )), - hostname=dict(type='str'), - ignore_image=dict(type='bool', default=False), - image=dict(type='str'), - image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), - init=dict(type='bool'), - interactive=dict(type='bool'), - ipc_mode=dict(type='str'), - keep_volumes=dict(type='bool', default=True), - kernel_memory=dict(type='str'), - kill_signal=dict(type='str'), - labels=dict(type='dict'), - links=dict(type='list', elements='str'), - log_driver=dict(type='str'), - log_options=dict(type='dict', aliases=['log_opt']), - mac_address=dict(type='str'), - memory=dict(type='str'), - memory_reservation=dict(type='str'), - memory_swap=dict(type='str'), - memory_swappiness=dict(type='int'), - mounts=dict(type='list', elements='dict', options=dict( - target=dict(type='str', required=True), - source=dict(type='str'), - type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'), - read_only=dict(type='bool'), - consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']), - propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']), - no_copy=dict(type='bool'), - labels=dict(type='dict'), - volume_driver=dict(type='str'), - volume_options=dict(type='dict'), - tmpfs_size=dict(type='str'), - tmpfs_mode=dict(type='str'), - )), - name=dict(type='str', required=True), - network_mode=dict(type='str'), - networks=dict(type='list', elements='dict', options=dict( - name=dict(type='str', required=True), - ipv4_address=dict(type='str'), - ipv6_address=dict(type='str'), - aliases=dict(type='list', elements='str'), - links=dict(type='list', elements='str'), - )), - networks_cli_compatible=dict(type='bool', default=True), - oom_killer=dict(type='bool'), - oom_score_adj=dict(type='int'), - output_logs=dict(type='bool', default=False), - paused=dict(type='bool'), - pid_mode=dict(type='str'), - pids_limit=dict(type='int'), - privileged=dict(type='bool'), - publish_all_ports=dict(type='bool'), - published_ports=dict(type='list', elements='str', aliases=['ports']), - pull=dict(type='bool', default=False), - purge_networks=dict(type='bool', default=False), - read_only=dict(type='bool'), - recreate=dict(type='bool', default=False), - removal_wait_timeout=dict(type='float'), - restart=dict(type='bool', default=False), - restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), - restart_retries=dict(type='int'), - runtime=dict(type='str'), - security_opts=dict(type='list', elements='str'), - shm_size=dict(type='str'), - state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']), - stop_signal=dict(type='str'), - stop_timeout=dict(type='int'), - storage_opts=dict(type='dict'), - sysctls=dict(type='dict'), - tmpfs=dict(type='list', elements='str'), - tty=dict(type='bool'), - ulimits=dict(type='list', elements='str'), - user=dict(type='str'), - userns_mode=dict(type='str'), - uts=dict(type='str'), - volume_driver=dict(type='str'), - volumes=dict(type='list', elements='str'), - volumes_from=dict(type='list', elements='str'), - working_dir=dict(type='str'), - ) - - required_if = [ - ('state', 'present', ['image']) - ] - - client = AnsibleDockerClientContainer( - argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True, - ) - if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None: - # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode - # (assuming no explicit value is specified for network_mode) - client.module.params['network_mode'] = client.module.params['networks'][0]['name'] - - try: - cm = ContainerManager(client) - client.module.exit_json(**sanitize_result(cm.results)) - except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) - except RequestException as e: - client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), - exception=traceback.format_exc()) + engine_driver = DockerAPIEngineDriver() + run_module(engine_driver) if __name__ == '__main__': diff --git a/tests/integration/targets/docker_container/tasks/main.yml b/tests/integration/targets/docker_container/tasks/main.yml index b07a41a62..4a1204f3f 100644 --- a/tests/integration/targets/docker_container/tasks/main.yml +++ b/tests/integration/targets/docker_container/tasks/main.yml @@ -53,10 +53,9 @@ state: absent force: yes with_items: "{{ dnetworks }}" - when: docker_py_version is version('1.10.0', '>=') diff: no - when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.25', '>=') + when: docker_api_version is version('1.25', '>=') - fail: msg="Too old docker / docker-py version to run all docker_container tests!" - when: not(docker_py_version is version('3.5.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) + when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6) diff --git a/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml b/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml index 2475f9a90..8cb08e3cf 100644 --- a/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml +++ b/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml @@ -33,7 +33,6 @@ type: bind read_only: no register: mounts_1 - ignore_errors: yes - name: mounts (idempotency) docker_container: @@ -50,7 +49,6 @@ target: /tmp type: bind register: mounts_2 - ignore_errors: yes - name: mounts (less mounts) docker_container: @@ -63,7 +61,6 @@ target: /tmp type: bind register: mounts_3 - ignore_errors: yes - name: mounts (more mounts) docker_container: @@ -81,7 +78,6 @@ read_only: yes force_kill: yes register: mounts_4 - ignore_errors: yes - name: mounts (different modes) docker_container: @@ -99,7 +95,6 @@ read_only: no force_kill: yes register: mounts_5 - ignore_errors: yes - name: mounts (endpoint collision) docker_container: @@ -161,13 +156,6 @@ - "'The mount point \"/x\" appears twice in the mounts option' == mounts_6.msg" - mounts_7 is changed - mounts_8 is not changed - when: docker_py_version is version('2.6.0', '>=') -- assert: - that: - - mounts_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg" - - "'Minimum version required is 2.6.0 ' in mounts_1.msg" - when: docker_py_version is version('2.6.0', '<') #################################################################### ## mounts + volumes ################################################ @@ -187,7 +175,6 @@ volumes: - /tmp:/tmp register: mounts_volumes_1 - ignore_errors: yes - name: mounts + volumes (idempotency) docker_container: @@ -203,7 +190,6 @@ volumes: - /tmp:/tmp register: mounts_volumes_2 - ignore_errors: yes - name: mounts + volumes (switching) docker_container: @@ -220,7 +206,6 @@ - /:/whatever:ro force_kill: yes register: mounts_volumes_3 - ignore_errors: yes - name: mounts + volumes (collision, should fail) docker_container: @@ -253,13 +238,6 @@ - mounts_volumes_3 is changed - mounts_volumes_4 is failed - "'The mount point \"/tmp\" appears both in the volumes and mounts option' in mounts_volumes_4.msg" - when: docker_py_version is version('2.6.0', '>=') -- assert: - that: - - mounts_volumes_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg" - - "'Minimum version required is 2.6.0 ' in mounts_1.msg" - when: docker_py_version is version('2.6.0', '<') #################################################################### ## volume_driver ################################################### diff --git a/tests/integration/targets/docker_container/tasks/tests/network.yml b/tests/integration/targets/docker_container/tasks/tests/network.yml index 028fca3a8..55555ad6b 100644 --- a/tests/integration/targets/docker_container/tasks/tests/network.yml +++ b/tests/integration/targets/docker_container/tasks/tests/network.yml @@ -20,7 +20,6 @@ - "{{ nname_2 }}" loop_control: loop_var: network_name - when: docker_py_version is version('1.10.0', '>=') - set_fact: subnet_ipv4_base: 10.{{ 16 + (240 | random) }}.{{ 16 + (240 | random) }} @@ -57,7 +56,6 @@ - subnet: "{{ subnet_ipv4 }}" - subnet: "{{ subnet_ipv6 }}" state: present - when: docker_py_version is version('1.10.0', '>=') #################################################################### ## network_mode #################################################### @@ -147,589 +145,577 @@ ## networks, purge_networks for networks_cli_compatible=no ######### #################################################################### -- block: - - name: networks_cli_compatible=no, networks w/o purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - - name: "{{ nname_2 }}" - networks_cli_compatible: no - register: networks_1 - - - name: networks_cli_compatible=no, networks w/o purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - - name: "{{ nname_2 }}" - networks_cli_compatible: no - register: networks_2 - - - name: networks_cli_compatible=no, networks, purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - purge_networks: yes - networks: - - name: bridge - - name: "{{ nname_1 }}" - networks_cli_compatible: no - force_kill: yes - register: networks_3 - - - name: networks_cli_compatible=no, networks, purge_networks (idempotency) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - purge_networks: yes - networks: - - name: "{{ nname_1 }}" - - name: bridge - networks_cli_compatible: no - register: networks_4 - - - name: networks_cli_compatible=no, networks (less networks) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: bridge - networks_cli_compatible: no - register: networks_5 - - - name: networks_cli_compatible=no, networks, purge_networks (less networks) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - purge_networks: yes - networks: - - name: bridge - networks_cli_compatible: no - force_kill: yes - register: networks_6 - - - name: networks_cli_compatible=no, networks, purge_networks (more networks) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - purge_networks: yes - networks: - - name: bridge - - name: "{{ nname_2 }}" - networks_cli_compatible: no - force_kill: yes - register: networks_7 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - assert: - that: - # networks_1 has networks default, 'bridge', nname_1 - - networks_1 is changed - - networks_1.container.NetworkSettings.Networks | length == 3 - - nname_1 in networks_1.container.NetworkSettings.Networks - - nname_2 in networks_1.container.NetworkSettings.Networks - - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" - # networks_2 has networks default, 'bridge', nname_1 - - networks_2 is not changed - - networks_2.container.NetworkSettings.Networks | length == 3 - - nname_1 in networks_2.container.NetworkSettings.Networks - - nname_2 in networks_1.container.NetworkSettings.Networks - - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" - # networks_3 has networks 'bridge', nname_1 - - networks_3 is changed - - networks_3.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_3.container.NetworkSettings.Networks - - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" - # networks_4 has networks 'bridge', nname_1 - - networks_4 is not changed - - networks_4.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_4.container.NetworkSettings.Networks - - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" - # networks_5 has networks 'bridge', nname_1 - - networks_5 is not changed - - networks_5.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_5.container.NetworkSettings.Networks - - "'default' in networks_5.container.NetworkSettings.Networks or 'bridge' in networks_5.container.NetworkSettings.Networks" - # networks_6 has networks 'bridge' - - networks_6 is changed - - networks_6.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" - # networks_7 has networks 'bridge', nname_2 - - networks_7 is changed - - networks_7.container.NetworkSettings.Networks | length == 2 - - nname_2 in networks_7.container.NetworkSettings.Networks - - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" - - when: docker_py_version is version('1.10.0', '>=') +- name: networks_cli_compatible=no, networks w/o purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + - name: "{{ nname_2 }}" + networks_cli_compatible: no + register: networks_1 + +- name: networks_cli_compatible=no, networks w/o purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + - name: "{{ nname_2 }}" + networks_cli_compatible: no + register: networks_2 + +- name: networks_cli_compatible=no, networks, purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: bridge + - name: "{{ nname_1 }}" + networks_cli_compatible: no + force_kill: yes + register: networks_3 + +- name: networks_cli_compatible=no, networks, purge_networks (idempotency) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: "{{ nname_1 }}" + - name: bridge + networks_cli_compatible: no + register: networks_4 + +- name: networks_cli_compatible=no, networks (less networks) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: bridge + networks_cli_compatible: no + register: networks_5 + +- name: networks_cli_compatible=no, networks, purge_networks (less networks) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: bridge + networks_cli_compatible: no + force_kill: yes + register: networks_6 + +- name: networks_cli_compatible=no, networks, purge_networks (more networks) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + purge_networks: yes + networks: + - name: bridge + - name: "{{ nname_2 }}" + networks_cli_compatible: no + force_kill: yes + register: networks_7 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + # networks_1 has networks default, 'bridge', nname_1 + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks | length == 3 + - nname_1 in networks_1.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" + # networks_2 has networks default, 'bridge', nname_1 + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks | length == 3 + - nname_1 in networks_2.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks" + # networks_3 has networks 'bridge', nname_1 + - networks_3 is changed + - networks_3.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_3.container.NetworkSettings.Networks + - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" + # networks_4 has networks 'bridge', nname_1 + - networks_4 is not changed + - networks_4.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_4.container.NetworkSettings.Networks + - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" + # networks_5 has networks 'bridge', nname_1 + - networks_5 is not changed + - networks_5.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_5.container.NetworkSettings.Networks + - "'default' in networks_5.container.NetworkSettings.Networks or 'bridge' in networks_5.container.NetworkSettings.Networks" + # networks_6 has networks 'bridge' + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" + # networks_7 has networks 'bridge', nname_2 + - networks_7 is changed + - networks_7.container.NetworkSettings.Networks | length == 2 + - nname_2 in networks_7.container.NetworkSettings.Networks + - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" #################################################################### ## networks for networks_cli_compatible=yes ######################## #################################################################### -- block: - - name: networks_cli_compatible=yes, networks specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - aliases: - - alias1 - - alias2 - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - register: networks_1 - - - name: networks_cli_compatible=yes, networks specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - register: networks_2 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - name: networks_cli_compatible=yes, empty networks list specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - register: networks_3 - - - name: networks_cli_compatible=yes, empty networks list specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - register: networks_4 - - - name: networks_cli_compatible=yes, empty networks list specified, purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - purge_networks: yes - force_kill: yes - register: networks_5 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - name: networks_cli_compatible=yes, networks not specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks_cli_compatible: yes - force_kill: yes - register: networks_6 - - - name: networks_cli_compatible=yes, networks not specified - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks_cli_compatible: yes - register: networks_7 - - - name: networks_cli_compatible=yes, networks not specified, purge_networks - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks_cli_compatible: yes - purge_networks: yes - force_kill: yes - register: networks_8 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - debug: var=networks_3 - - - assert: - that: - # networks_1 has networks nname_1, nname_2 - - networks_1 is changed - - networks_1.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_1.container.NetworkSettings.Networks - - nname_2 in networks_1.container.NetworkSettings.Networks - # networks_2 has networks nname_1, nname_2 - - networks_2 is not changed - - networks_2.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_2.container.NetworkSettings.Networks - - nname_2 in networks_1.container.NetworkSettings.Networks - # networks_3 has networks 'bridge' - - networks_3 is changed - - networks_3.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" - # networks_4 has networks 'bridge' - - networks_4 is not changed - - networks_4.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" - # networks_5 has no networks - - networks_5 is changed - - networks_5.container.NetworkSettings.Networks | length == 0 - # networks_6 has networks 'bridge' - - networks_6 is changed - - networks_6.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" - # networks_7 has networks 'bridge' - - networks_7 is not changed - - networks_7.container.NetworkSettings.Networks | length == 1 - - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" - # networks_8 has no networks - - networks_8 is changed - - networks_8.container.NetworkSettings.Networks | length == 0 - - when: docker_py_version is version('1.10.0', '>=') +- name: networks_cli_compatible=yes, networks specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + aliases: + - alias1 + - alias2 + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + register: networks_1 + +- name: networks_cli_compatible=yes, networks specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + register: networks_2 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- name: networks_cli_compatible=yes, empty networks list specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + register: networks_3 + +- name: networks_cli_compatible=yes, empty networks list specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + register: networks_4 + +- name: networks_cli_compatible=yes, empty networks list specified, purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + purge_networks: yes + force_kill: yes + register: networks_5 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- name: networks_cli_compatible=yes, networks not specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks_cli_compatible: yes + force_kill: yes + register: networks_6 + +- name: networks_cli_compatible=yes, networks not specified + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks_cli_compatible: yes + register: networks_7 + +- name: networks_cli_compatible=yes, networks not specified, purge_networks + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks_cli_compatible: yes + purge_networks: yes + force_kill: yes + register: networks_8 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- debug: var=networks_3 + +- assert: + that: + # networks_1 has networks nname_1, nname_2 + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_1.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + # networks_2 has networks nname_1, nname_2 + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_2.container.NetworkSettings.Networks + - nname_2 in networks_1.container.NetworkSettings.Networks + # networks_3 has networks 'bridge' + - networks_3 is changed + - networks_3.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks" + # networks_4 has networks 'bridge' + - networks_4 is not changed + - networks_4.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks" + # networks_5 has no networks + - networks_5 is changed + - networks_5.container.NetworkSettings.Networks | length == 0 + # networks_6 has networks 'bridge' + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks" + # networks_7 has networks 'bridge' + - networks_7 is not changed + - networks_7.container.NetworkSettings.Networks | length == 1 + - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks" + # networks_8 has no networks + - networks_8 is changed + - networks_8.container.NetworkSettings.Networks | length == 0 #################################################################### ## networks with comparisons ####################################### #################################################################### -- block: - - name: create container with one network - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_1 }}" - networks_cli_compatible: yes - register: networks_1 - - - name: different networks, comparisons=ignore - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - comparisons: - network_mode: ignore # otherwise we'd have to set network_mode to nname_1 - networks: ignore - register: networks_2 - - - name: less networks, comparisons=ignore - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - comparisons: - networks: ignore - register: networks_3 - - - name: less networks, comparisons=allow_more_present - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - comparisons: - networks: allow_more_present - register: networks_4 - - - name: different networks, comparisons=allow_more_present - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - comparisons: - network_mode: ignore # otherwise we'd have to set network_mode to nname_1 - networks: allow_more_present - force_kill: yes - register: networks_5 - - - name: different networks, comparisons=strict - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_2 }}" - networks_cli_compatible: yes - comparisons: - networks: strict - force_kill: yes - register: networks_6 - - - name: less networks, comparisons=strict - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: [] - networks_cli_compatible: yes - comparisons: - networks: strict - force_kill: yes - register: networks_7 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - assert: - that: - # networks_1 has networks nname_1 - - networks_1 is changed - - networks_1.container.NetworkSettings.Networks | length == 1 - - nname_1 in networks_1.container.NetworkSettings.Networks - # networks_2 has networks nname_1 - - networks_2 is not changed - - networks_2.container.NetworkSettings.Networks | length == 1 - - nname_1 in networks_2.container.NetworkSettings.Networks - # networks_3 has networks nname_1 - - networks_3 is not changed - - networks_3.container.NetworkSettings.Networks | length == 1 - - nname_1 in networks_3.container.NetworkSettings.Networks - # networks_4 has networks nname_1 - - networks_4 is not changed - - networks_4.container.NetworkSettings.Networks | length == 1 - - nname_1 in networks_4.container.NetworkSettings.Networks - # networks_5 has networks nname_1, nname_2 - - networks_5 is changed - - networks_5.container.NetworkSettings.Networks | length == 2 - - nname_1 in networks_5.container.NetworkSettings.Networks - - nname_2 in networks_5.container.NetworkSettings.Networks - # networks_6 has networks nname_2 - - networks_6 is changed - - networks_6.container.NetworkSettings.Networks | length == 1 - - nname_2 in networks_6.container.NetworkSettings.Networks - # networks_7 has no networks - - networks_7 is changed - - networks_7.container.NetworkSettings.Networks | length == 0 - - when: docker_py_version is version('1.10.0', '>=') +- name: create container with one network + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_1 }}" + networks_cli_compatible: yes + register: networks_1 + +- name: different networks, comparisons=ignore + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + comparisons: + network_mode: ignore # otherwise we'd have to set network_mode to nname_1 + networks: ignore + register: networks_2 + +- name: less networks, comparisons=ignore + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + comparisons: + networks: ignore + register: networks_3 + +- name: less networks, comparisons=allow_more_present + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + comparisons: + networks: allow_more_present + register: networks_4 + +- name: different networks, comparisons=allow_more_present + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + comparisons: + network_mode: ignore # otherwise we'd have to set network_mode to nname_1 + networks: allow_more_present + force_kill: yes + register: networks_5 + +- name: different networks, comparisons=strict + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_2 }}" + networks_cli_compatible: yes + comparisons: + networks: strict + force_kill: yes + register: networks_6 + +- name: less networks, comparisons=strict + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: [] + networks_cli_compatible: yes + comparisons: + networks: strict + force_kill: yes + register: networks_7 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + # networks_1 has networks nname_1 + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_1.container.NetworkSettings.Networks + # networks_2 has networks nname_1 + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_2.container.NetworkSettings.Networks + # networks_3 has networks nname_1 + - networks_3 is not changed + - networks_3.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_3.container.NetworkSettings.Networks + # networks_4 has networks nname_1 + - networks_4 is not changed + - networks_4.container.NetworkSettings.Networks | length == 1 + - nname_1 in networks_4.container.NetworkSettings.Networks + # networks_5 has networks nname_1, nname_2 + - networks_5 is changed + - networks_5.container.NetworkSettings.Networks | length == 2 + - nname_1 in networks_5.container.NetworkSettings.Networks + - nname_2 in networks_5.container.NetworkSettings.Networks + # networks_6 has networks nname_2 + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks | length == 1 + - nname_2 in networks_6.container.NetworkSettings.Networks + # networks_7 has no networks + - networks_7 is changed + - networks_7.container.NetworkSettings.Networks | length == 0 #################################################################### ## networks with IP address ######################################## #################################################################### -- block: - - name: create container (stopped) with one network and fixed IP - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: stopped - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_2 }}" - ipv6_address: "{{ nname_3_ipv6_2 }}" - networks_cli_compatible: yes - register: networks_1 - - - name: create container (stopped) with one network and fixed IP (idempotent) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: stopped - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_2 }}" - ipv6_address: "{{ nname_3_ipv6_2 }}" - networks_cli_compatible: yes - register: networks_2 - - - name: create container (stopped) with one network and fixed IP (different IPv4) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: stopped - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_3 }}" - ipv6_address: "{{ nname_3_ipv6_2 }}" - networks_cli_compatible: yes - register: networks_3 - - - name: create container (stopped) with one network and fixed IP (different IPv6) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: stopped - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_3 }}" - ipv6_address: "{{ nname_3_ipv6_3 }}" - networks_cli_compatible: yes - register: networks_4 - - - name: create container (started) with one network and fixed IP - docker_container: - name: "{{ cname }}" - state: started - register: networks_5 - - - name: create container (started) with one network and fixed IP (different IPv4) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_4 }}" - ipv6_address: "{{ nname_3_ipv6_3 }}" - networks_cli_compatible: yes - force_kill: yes - register: networks_6 - - - name: create container (started) with one network and fixed IP (different IPv6) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_4 }}" - ipv6_address: "{{ nname_3_ipv6_4 }}" - networks_cli_compatible: yes - force_kill: yes - register: networks_7 - - - name: create container (started) with one network and fixed IP (idempotent) - docker_container: - image: "{{ docker_test_image_alpine }}" - command: '/bin/sh -c "sleep 10m"' - name: "{{ cname }}" - state: started - networks: - - name: "{{ nname_3 }}" - ipv4_address: "{{ nname_3_ipv4_4 }}" - ipv6_address: "{{ nname_3_ipv6_4 }}" - networks_cli_compatible: yes - register: networks_8 - - - name: cleanup - docker_container: - name: "{{ cname }}" - state: absent - force_kill: yes - diff: no - - - assert: - that: - - networks_1 is changed - - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 - - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr - - networks_1.container.NetworkSettings.Networks[nname_3].IPAddress == "" - - networks_1.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" - - networks_2 is not changed - - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 - - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr - - networks_2.container.NetworkSettings.Networks[nname_3].IPAddress == "" - - networks_2.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" - - networks_3 is changed - - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 - - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr - - networks_3.container.NetworkSettings.Networks[nname_3].IPAddress == "" - - networks_3.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" - - networks_4 is changed - - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 - - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_4.container.NetworkSettings.Networks[nname_3].IPAddress == "" - - networks_4.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" - - networks_5 is changed - - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 - - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_5.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_3 - - networks_5.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_6 is changed - - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 - - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_6.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 - - networks_6.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr - - networks_7 is changed - - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 - - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr - - networks_7.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 - - networks_7.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr - - networks_8 is not changed - - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 - - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr - - networks_8.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 - - networks_8.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr - - when: docker_py_version is version('1.10.0', '>=') +- name: create container (stopped) with one network and fixed IP + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_2 }}" + ipv6_address: "{{ nname_3_ipv6_2 }}" + networks_cli_compatible: yes + register: networks_1 + +- name: create container (stopped) with one network and fixed IP (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_2 }}" + ipv6_address: "{{ nname_3_ipv6_2 }}" + networks_cli_compatible: yes + register: networks_2 + +- name: create container (stopped) with one network and fixed IP (different IPv4) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_3 }}" + ipv6_address: "{{ nname_3_ipv6_2 }}" + networks_cli_compatible: yes + register: networks_3 + +- name: create container (stopped) with one network and fixed IP (different IPv6) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: stopped + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_3 }}" + ipv6_address: "{{ nname_3_ipv6_3 }}" + networks_cli_compatible: yes + register: networks_4 + +- name: create container (started) with one network and fixed IP + docker_container: + name: "{{ cname }}" + state: started + register: networks_5 + +- name: create container (started) with one network and fixed IP (different IPv4) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_4 }}" + ipv6_address: "{{ nname_3_ipv6_3 }}" + networks_cli_compatible: yes + force_kill: yes + register: networks_6 + +- name: create container (started) with one network and fixed IP (different IPv6) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_4 }}" + ipv6_address: "{{ nname_3_ipv6_4 }}" + networks_cli_compatible: yes + force_kill: yes + register: networks_7 + +- name: create container (started) with one network and fixed IP (idempotent) + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + networks: + - name: "{{ nname_3 }}" + ipv4_address: "{{ nname_3_ipv4_4 }}" + ipv6_address: "{{ nname_3_ipv6_4 }}" + networks_cli_compatible: yes + register: networks_8 + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- assert: + that: + - networks_1 is changed + - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 + - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr + - networks_1.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_1.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_2 is not changed + - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2 + - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr + - networks_2.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_2.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_3 is changed + - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 + - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr + - networks_3.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_3.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_4 is changed + - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 + - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_4.container.NetworkSettings.Networks[nname_3].IPAddress == "" + - networks_4.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == "" + - networks_5 is changed + - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3 + - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_5.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_3 + - networks_5.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_6 is changed + - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 + - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_6.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 + - networks_6.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr + - networks_7 is changed + - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 + - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + - networks_7.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 + - networks_7.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + - networks_8 is not changed + - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4 + - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr + - networks_8.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4 + - networks_8.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr #################################################################### #################################################################### @@ -746,4 +732,3 @@ - "{{ nname_3 }}" loop_control: loop_var: network_name - when: docker_py_version is version('1.10.0', '>=') diff --git a/tests/integration/targets/docker_container/tasks/tests/options.yml b/tests/integration/targets/docker_container/tasks/tests/options.yml index 4b898b7e8..4a0ccf775 100644 --- a/tests/integration/targets/docker_container/tasks/tests/options.yml +++ b/tests/integration/targets/docker_container/tasks/tests/options.yml @@ -21,7 +21,6 @@ state: started auto_remove: yes register: auto_remove_1 - ignore_errors: yes - name: Give container 1 second to be sure it terminated pause: @@ -32,19 +31,11 @@ name: "{{ cname }}" state: absent register: auto_remove_2 - ignore_errors: yes - assert: that: - auto_remove_1 is changed - auto_remove_2 is not changed - when: docker_py_version is version('2.1.0', '>=') -- assert: - that: - - auto_remove_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in auto_remove_1.msg" - - "'Minimum version required is 2.1.0 ' in auto_remove_1.msg" - when: docker_py_version is version('2.1.0', '<') #################################################################### ## blkio_weight #################################################### @@ -573,7 +564,6 @@ name: "{{ cname }}" cpus: 1 state: started - ignore_errors: yes register: cpus_1 - name: cpus (idempotency) @@ -583,7 +573,6 @@ name: "{{ cname }}" cpus: 1 state: started - ignore_errors: yes register: cpus_2 - name: cpus (change) @@ -596,7 +585,6 @@ force_kill: yes # This will fail if the system the test is run on doesn't have # multiple MEMs available. - ignore_errors: yes register: cpus_3 - name: cleanup @@ -611,13 +599,6 @@ - cpus_1 is changed - cpus_2 is not changed and cpus_2 is not failed - cpus_3 is failed or cpus_3 is changed - when: docker_py_version is version('2.3.0', '>=') -- assert: - that: - - cpus_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in cpus_1.msg" - - "'Minimum version required is 2.3.0 ' in cpus_1.msg" - when: docker_py_version is version('2.3.0', '<') #################################################################### ## debug ########################################################### @@ -741,11 +722,8 @@ - detach_cleanup_nonzero.status == 42 - "'Output' in detach_cleanup_nonzero.container" - "detach_cleanup_nonzero.container.Output == ''" -- assert: - that: - "'Cannot retrieve result as auto_remove is enabled' == detach_auto_remove.container.Output" - detach_auto_remove_cleanup is not changed - when: docker_py_version is version('2.1.0', '>=') #################################################################### ## devices ######################################################### @@ -825,7 +803,6 @@ - path: /dev/urandom rate: 10K register: device_read_bps_1 - ignore_errors: yes - name: device_read_bps (idempotency) docker_container: @@ -839,7 +816,6 @@ - path: /dev/random rate: 20M register: device_read_bps_2 - ignore_errors: yes - name: device_read_bps (lesser entries) docker_container: @@ -851,7 +827,6 @@ - path: /dev/random rate: 20M register: device_read_bps_3 - ignore_errors: yes - name: device_read_bps (changed) docker_container: @@ -866,7 +841,6 @@ rate: 5K force_kill: yes register: device_read_bps_4 - ignore_errors: yes - name: cleanup docker_container: @@ -881,13 +855,6 @@ - device_read_bps_2 is not changed - device_read_bps_3 is not changed - device_read_bps_4 is changed - when: docker_py_version is version('1.9.0', '>=') -- assert: - that: - - device_read_bps_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in device_read_bps_1.msg" - - "'Minimum version required is 1.9.0 ' in device_read_bps_1.msg" - when: docker_py_version is version('1.9.0', '<') #################################################################### ## device_read_iops ################################################ @@ -905,7 +872,6 @@ - path: /dev/urandom rate: 20 register: device_read_iops_1 - ignore_errors: yes - name: device_read_iops (idempotency) docker_container: @@ -919,7 +885,6 @@ - path: /dev/random rate: 10 register: device_read_iops_2 - ignore_errors: yes - name: device_read_iops (less) docker_container: @@ -931,7 +896,6 @@ - path: /dev/random rate: 10 register: device_read_iops_3 - ignore_errors: yes - name: device_read_iops (changed) docker_container: @@ -946,7 +910,6 @@ rate: 50 force_kill: yes register: device_read_iops_4 - ignore_errors: yes - name: cleanup docker_container: @@ -961,13 +924,6 @@ - device_read_iops_2 is not changed - device_read_iops_3 is not changed - device_read_iops_4 is changed - when: docker_py_version is version('1.9.0', '>=') -- assert: - that: - - device_read_iops_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in device_read_iops_1.msg" - - "'Minimum version required is 1.9.0 ' in device_read_iops_1.msg" - when: docker_py_version is version('1.9.0', '<') #################################################################### ## device_write_bps and device_write_iops ########################## @@ -986,7 +942,6 @@ - path: /dev/urandom rate: 30 register: device_write_limit_1 - ignore_errors: yes - name: device_write_bps and device_write_iops (idempotency) docker_container: @@ -1001,7 +956,6 @@ - path: /dev/urandom rate: 30 register: device_write_limit_2 - ignore_errors: yes - name: device_write_bps device_write_iops (changed) docker_container: @@ -1017,7 +971,6 @@ rate: 100 force_kill: yes register: device_write_limit_3 - ignore_errors: yes - name: cleanup docker_container: @@ -1031,13 +984,6 @@ - device_write_limit_1 is changed - device_write_limit_2 is not changed - device_write_limit_3 is changed - when: docker_py_version is version('1.9.0', '>=') -- assert: - that: - - device_write_limit_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in device_write_limit_1.msg" - - "'Minimum version required is 1.9.0 ' in device_write_limit_1.msg" - when: docker_py_version is version('1.9.0', '<') #################################################################### ## device_requests ################################################# @@ -1074,14 +1020,13 @@ that: - device_requests_1 is changed - device_requests_2 is not changed - when: docker_py_version is version('4.3.0', '>=') and docker_api_version is version('1.40', '>=') + when: docker_api_version is version('1.40', '>=') - assert: that: - device_requests_1 is failed - | - (('version is ' ~ docker_py_version ~ ' ') in device_requests_1.msg and 'Minimum version required is 4.3.0 ' in device_requests_1.msg) or - (('API version is ' ~ docker_api_version ~ '.') in device_requests_1.msg and 'Minimum version required is 1.40 ' in device_requests_1.msg) - when: docker_py_version is version('4.3.0', '<') or docker_api_version is version('1.40', '<') + ('API version is ' ~ docker_api_version ~ '.') in device_requests_1.msg and 'Minimum version required is 1.40 ' in device_requests_1.msg + when: docker_api_version is version('1.40', '<') #################################################################### ## dns_opts ######################################################## @@ -1097,7 +1042,6 @@ - "timeout:10" - rotate register: dns_opts_1 - ignore_errors: yes - name: dns_opts (idempotency) docker_container: @@ -1109,7 +1053,6 @@ - rotate - "timeout:10" register: dns_opts_2 - ignore_errors: yes - name: dns_opts (less resolv.conf options) docker_container: @@ -1120,7 +1063,6 @@ dns_opts: - "timeout:10" register: dns_opts_3 - ignore_errors: yes - name: dns_opts (more resolv.conf options) docker_container: @@ -1133,7 +1075,6 @@ - no-check-names force_kill: yes register: dns_opts_4 - ignore_errors: yes - name: cleanup docker_container: @@ -1148,13 +1089,6 @@ - dns_opts_2 is not changed - dns_opts_3 is not changed - dns_opts_4 is changed - when: docker_py_version is version('1.10.0', '>=') -- assert: - that: - - dns_opts_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in dns_opts_1.msg" - - "'Minimum version required is 1.10.0 ' in dns_opts_1.msg" - when: docker_py_version is version('1.10.0', '<') #################################################################### ## dns_search_domains ############################################## @@ -1854,7 +1788,6 @@ retries: 2 force_kill: yes register: healthcheck_1 - ignore_errors: yes - name: healthcheck (idempotency) docker_container: @@ -1872,7 +1805,6 @@ retries: 2 force_kill: yes register: healthcheck_2 - ignore_errors: yes - name: healthcheck (changed) docker_container: @@ -1890,7 +1822,6 @@ retries: 3 force_kill: yes register: healthcheck_3 - ignore_errors: yes - name: healthcheck (no change) docker_container: @@ -1900,7 +1831,6 @@ state: started force_kill: yes register: healthcheck_4 - ignore_errors: yes - name: healthcheck (disabled) docker_container: @@ -1913,7 +1843,6 @@ - NONE force_kill: yes register: healthcheck_5 - ignore_errors: yes - name: healthcheck (disabled, idempotency) docker_container: @@ -1926,7 +1855,6 @@ - NONE force_kill: yes register: healthcheck_6 - ignore_errors: yes - name: healthcheck (disabled, idempotency, strict) docker_container: @@ -1941,7 +1869,6 @@ comparisons: '*': strict register: healthcheck_7 - ignore_errors: yes - name: healthcheck (string in healthcheck test, changed) docker_container: @@ -1953,7 +1880,6 @@ test: "sleep 1" force_kill: yes register: healthcheck_8 - ignore_errors: yes - name: healthcheck (string in healthcheck test, idempotency) docker_container: @@ -1965,7 +1891,6 @@ test: "sleep 1" force_kill: yes register: healthcheck_9 - ignore_errors: yes - name: cleanup docker_container: @@ -1985,13 +1910,6 @@ - healthcheck_7 is not changed - healthcheck_8 is changed - healthcheck_9 is not changed - when: docker_py_version is version('2.0.0', '>=') -- assert: - that: - - healthcheck_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in healthcheck_1.msg" - - "'Minimum version required is 2.0.0 ' in healthcheck_1.msg" - when: docker_py_version is version('2.0.0', '<') #################################################################### ## hostname ######################################################## @@ -2050,7 +1968,6 @@ init: yes state: started register: init_1 - ignore_errors: yes - name: init (idempotency) docker_container: @@ -2060,7 +1977,6 @@ init: yes state: started register: init_2 - ignore_errors: yes - name: init (change) docker_container: @@ -2071,7 +1987,6 @@ state: started force_kill: yes register: init_3 - ignore_errors: yes - name: cleanup docker_container: @@ -2085,13 +2000,6 @@ - init_1 is changed - init_2 is not changed - init_3 is changed - when: docker_py_version is version('2.2.0', '>=') -- assert: - that: - - init_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in init_1.msg" - - "'Minimum version required is 2.2.0 ' in init_1.msg" - when: docker_py_version is version('2.2.0', '<') #################################################################### ## interactive ##################################################### @@ -2462,7 +2370,6 @@ state: absent force_kill: yes diff: no - ignore_errors: yes - assert: that: @@ -3188,8 +3095,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started pid_mode: "container:{{ pid_mode_helper.container.Id }}" register: pid_mode_1 - ignore_errors: yes - # docker-py < 2.0 does not support "arbitrary" pid_mode values - name: pid_mode (idempotency) docker_container: @@ -3199,8 +3104,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started pid_mode: "container:{{ cname_h1 }}" register: pid_mode_2 - ignore_errors: yes - # docker-py < 2.0 does not support "arbitrary" pid_mode values - name: pid_mode (change) docker_container: @@ -3229,13 +3132,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - pid_mode_1 is changed - pid_mode_2 is not changed - pid_mode_3 is changed - when: docker_py_version is version('2.0.0', '>=') -- assert: - that: - - pid_mode_1 is failed - - pid_mode_2 is failed - - pid_mode_3 is changed - when: docker_py_version is version('2.0.0', '<') #################################################################### ## pids_limit ###################################################### @@ -3249,7 +3145,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started pids_limit: 10 register: pids_limit_1 - ignore_errors: yes - name: pids_limit (idempotency) docker_container: @@ -3259,7 +3154,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started pids_limit: 10 register: pids_limit_2 - ignore_errors: yes - name: pids_limit (changed) docker_container: @@ -3270,7 +3164,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau pids_limit: 20 force_kill: yes register: pids_limit_3 - ignore_errors: yes - name: cleanup docker_container: @@ -3284,13 +3177,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - pids_limit_1 is changed - pids_limit_2 is not changed - pids_limit_3 is changed - when: docker_py_version is version('1.10.0', '>=') -- assert: - that: - - pids_limit_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in pids_limit_1.msg" - - "'Minimum version required is 1.10.0 ' in pids_limit_1.msg" - when: docker_py_version is version('1.10.0', '<') #################################################################### ## privileged ###################################################### @@ -3648,7 +3534,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau runtime: runc state: started register: runtime_1 - ignore_errors: yes - name: runtime (idempotency) docker_container: @@ -3658,7 +3543,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau runtime: runc state: started register: runtime_2 - ignore_errors: yes - name: cleanup docker_container: @@ -3671,13 +3555,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau that: - runtime_1 is changed - runtime_2 is not changed - when: docker_py_version is version('2.4.0', '>=') -- assert: - that: - - runtime_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in runtime_1.msg" - - "'Minimum version required is 2.4.0 ' in runtime_1.msg" - when: docker_py_version is version('2.4.0', '<') #################################################################### ## security_opts ################################################### @@ -3975,7 +3852,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau net.ipv4.icmp_echo_ignore_all: 1 net.ipv4.ip_forward: 1 register: sysctls_1 - ignore_errors: yes - name: sysctls (idempotency) docker_container: @@ -3987,7 +3863,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau net.ipv4.ip_forward: 1 net.ipv4.icmp_echo_ignore_all: 1 register: sysctls_2 - ignore_errors: yes - name: sysctls (less sysctls) docker_container: @@ -3998,7 +3873,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau sysctls: net.ipv4.icmp_echo_ignore_all: 1 register: sysctls_3 - ignore_errors: yes - name: sysctls (more sysctls) docker_container: @@ -4011,7 +3885,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau net.ipv6.conf.default.accept_redirects: 0 force_kill: yes register: sysctls_4 - ignore_errors: yes - name: cleanup docker_container: @@ -4026,13 +3899,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - sysctls_2 is not changed - sysctls_3 is not changed - sysctls_4 is changed - when: docker_py_version is version('1.10.0', '>=') -- assert: - that: - - sysctls_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in sysctls_1.msg" - - "'Minimum version required is 1.10.0 ' in sysctls_1.msg" - when: docker_py_version is version('1.10.0', '<') #################################################################### ## tmpfs ########################################################### @@ -4260,7 +4126,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau userns_mode: host state: started register: userns_mode_1 - ignore_errors: yes - name: userns_mode (idempotency) docker_container: @@ -4270,7 +4135,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau userns_mode: host state: started register: userns_mode_2 - ignore_errors: yes - name: userns_mode (change) docker_container: @@ -4281,7 +4145,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started force_kill: yes register: userns_mode_3 - ignore_errors: yes - name: cleanup docker_container: @@ -4295,13 +4158,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - userns_mode_1 is changed - userns_mode_2 is not changed - userns_mode_3 is changed - when: docker_py_version is version('1.10.0', '>=') -- assert: - that: - - userns_mode_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in userns_mode_1.msg" - - "'Minimum version required is 1.10.0 ' in userns_mode_1.msg" - when: docker_py_version is version('1.10.0', '<') #################################################################### ## uts ############################################################# @@ -4315,7 +4171,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau uts: host state: started register: uts_1 - ignore_errors: yes - name: uts (idempotency) docker_container: @@ -4325,7 +4180,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau uts: host state: started register: uts_2 - ignore_errors: yes - name: uts (change) docker_container: @@ -4336,7 +4190,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau state: started force_kill: yes register: uts_3 - ignore_errors: yes - name: cleanup docker_container: @@ -4350,13 +4203,6 @@ avoid such warnings, please quote the value.' in (log_options_2.warnings | defau - uts_1 is changed - uts_2 is not changed - uts_3 is changed - when: docker_py_version is version('3.5.0', '>=') -- assert: - that: - - uts_1 is failed - - "('version is ' ~ docker_py_version ~ ' ') in uts_1.msg" - - "'Minimum version required is 3.5.0 ' in uts_1.msg" - when: docker_py_version is version('3.5.0', '<') #################################################################### ## working_dir ##################################################### diff --git a/tests/integration/targets/docker_container/tasks/tests/update.yml b/tests/integration/targets/docker_container/tasks/tests/update.yml new file mode 100644 index 000000000..bfff1070e --- /dev/null +++ b/tests/integration/targets/docker_container/tasks/tests/update.yml @@ -0,0 +1,172 @@ +--- +- name: Registering container name + set_fact: + cname: "{{ cname_prefix ~ '-update' }}" +- name: Registering container name + set_fact: + cnames: "{{ cnames + [cname] }}" + +# We do not test cpuset_cpus and cpuset_mems since changing it fails if the system does +# not have 'enough' CPUs. We do not test kernel_memory since it is deprecated and fails. + +- name: Create container + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + blkio_weight: 123 + cpu_period: 90000 + cpu_quota: 150000 + cpu_shares: 900 + memory: 64M + memory_reservation: 64M + memory_swap: 64M + restart_policy: on-failure + restart_retries: 5 + register: create + +- name: Update values + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + blkio_weight: 234 + cpu_period: 50000 + cpu_quota: 50000 + cpu_shares: 1100 + memory: 48M + memory_reservation: 48M + memory_swap: unlimited + restart_policy: on-failure # only on-failure can have restart_retries, so don't change it here + restart_retries: 2 + register: update + diff: yes + +- name: Update values again + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 10m"' + name: "{{ cname }}" + state: started + blkio_weight: 135 + cpu_period: 30000 + cpu_quota: 40000 + cpu_shares: 1000 + memory: 32M + memory_reservation: 30M + memory_swap: 128M + restart_policy: always + restart_retries: 0 + register: update2 + diff: yes + +- name: Recreate container + docker_container: + image: "{{ docker_test_image_alpine }}" + command: '/bin/sh -c "sleep 20m"' # this will force re-creation + name: "{{ cname }}" + state: started + blkio_weight: 234 + cpu_period: 50000 + cpu_quota: 50000 + cpu_shares: 1100 + memory: 48M + memory_reservation: 48M + memory_swap: unlimited + restart_policy: on-failure + restart_retries: 2 + force_kill: yes + register: recreate + diff: yes + +- name: cleanup + docker_container: + name: "{{ cname }}" + state: absent + force_kill: yes + diff: no + +- name: Check general things + assert: + that: + - create is changed + - update is changed + - update2 is changed + - recreate is changed + + # Make sure the container was *not* recreated when it should not be + - create.container.Id == update.container.Id + - create.container.Id == update2.container.Id + + # Make sure that the container was recreated when it should be + - create.container.Id != recreate.container.Id + +- name: Check diff for first update + assert: + that: + # blkio_weight sometimes cannot be set, then we end up with 0 instead of the value we had + - update.diff.before.blkio_weight == 123 or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (create.warnings | default([])) + - update.diff.after.blkio_weight == 234 + - update.diff.before.cpu_period == 90000 + - update.diff.after.cpu_period == 50000 + - update.diff.before.cpu_quota == 150000 + - update.diff.after.cpu_quota == 50000 + - update.diff.before.cpu_shares == 900 + - update.diff.after.cpu_shares == 1100 + - update.diff.before.memory == 67108864 + - update.diff.after.memory == 50331648 + - update.diff.before.memory_reservation == 67108864 + - update.diff.after.memory_reservation == 50331648 + - (update.diff.before.memory_swap | default(0)) == 67108864 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) + - (update.diff.after.memory_swap | default(0)) == -1 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) + - "'restart_policy' not in update.diff.before" + - update.diff.before.restart_retries == 5 + - update.diff.after.restart_retries == 2 + +- name: Check diff for second update + assert: + that: + - update2.diff.before.blkio_weight == 234 or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (create.warnings | default([])) + - update2.diff.after.blkio_weight == 135 + - update2.diff.before.cpu_period == 50000 + - update2.diff.after.cpu_period == 30000 + - update2.diff.before.cpu_quota == 50000 + - update2.diff.after.cpu_quota == 40000 + - update2.diff.before.cpu_shares == 1100 + - update2.diff.after.cpu_shares == 1000 + - update2.diff.before.memory == 50331648 + - update2.diff.after.memory == 33554432 + - update2.diff.before.memory_reservation == 50331648 + - update2.diff.after.memory_reservation == 31457280 + - (update2.diff.before.memory_swap | default(0)) == -1 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) + - (update2.diff.after.memory_swap | default(0)) == 134217728 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) + - update2.diff.before.restart_policy == 'on-failure' + - update2.diff.after.restart_policy == 'always' + - update2.diff.before.restart_retries == 2 + - update2.diff.after.restart_retries == 0 + +- name: Check diff for recreation + assert: + that: + - recreate.diff.before.blkio_weight == 135 or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (create.warnings | default([])) + - recreate.diff.after.blkio_weight == 234 + - recreate.diff.before.cpu_period == 30000 + - recreate.diff.after.cpu_period == 50000 + - recreate.diff.before.cpu_quota == 40000 + - recreate.diff.after.cpu_quota == 50000 + - recreate.diff.before.cpu_shares == 1000 + - recreate.diff.after.cpu_shares == 1100 + - recreate.diff.before.memory == 33554432 + - recreate.diff.after.memory == 50331648 + - recreate.diff.before.memory_reservation == 31457280 + - recreate.diff.after.memory_reservation == 50331648 + - (recreate.diff.before.memory_swap | default(0)) == 134217728 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) + - (recreate.diff.after.memory_swap | default(0)) == -1 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([])) + - recreate.diff.before.restart_policy == 'always' + - recreate.diff.after.restart_policy == 'on-failure' + - recreate.diff.before.restart_retries == 0 + - recreate.diff.after.restart_retries == 2 + - recreate.diff.before.command == ['/bin/sh', '-c', 'sleep 10m'] + - recreate.diff.after.command == ['/bin/sh', '-c', 'sleep 20m'] diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index fc12c3046..f1a910e3d 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -5,4 +5,6 @@ .azure-pipelines/scripts/publish-codecov.py future-import-boilerplate .azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate plugins/modules/current_container_facts.py validate-modules:return-syntax-error -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path +plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax +plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax +plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index fc12c3046..f1a910e3d 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -5,4 +5,6 @@ .azure-pipelines/scripts/publish-codecov.py future-import-boilerplate .azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate plugins/modules/current_container_facts.py validate-modules:return-syntax-error -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path +plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax +plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax +plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 9be213a02..5d5b2fd4c 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -1,3 +1,2 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen plugins/modules/current_container_facts.py validate-modules:return-syntax-error -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt index 878e37963..2dc9aec2e 100644 --- a/tests/sanity/ignore-2.13.txt +++ b/tests/sanity/ignore-2.13.txt @@ -1,2 +1 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index 878e37963..2dc9aec2e 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -1,2 +1 @@ .azure-pipelines/scripts/publish-codecov.py replace-urlopen -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 8cdc50bd0..4d39d8bb6 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -4,4 +4,6 @@ .azure-pipelines/scripts/publish-codecov.py compile-3.5!skip # Uses Python 3.6+ syntax .azure-pipelines/scripts/publish-codecov.py future-import-boilerplate .azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate -plugins/modules/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path +plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax +plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax +plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax diff --git a/tests/unit/plugins/modules/test_docker_container.py b/tests/unit/plugins/modules/test_docker_container.py deleted file mode 100644 index 00701961f..000000000 --- a/tests/unit/plugins/modules/test_docker_container.py +++ /dev/null @@ -1,22 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import unittest - -from ansible_collections.community.docker.plugins.modules.docker_container import TaskParameters - - -class TestTaskParameters(unittest.TestCase): - """Unit tests for TaskParameters.""" - - def test_parse_exposed_ports_tcp_udp(self): - """ - Ensure _parse_exposed_ports does not cancel ports with the same - number but different protocol. - """ - task_params = TaskParameters.__new__(TaskParameters) - task_params.exposed_ports = None - result = task_params._parse_exposed_ports([80, '443', '443/udp']) - self.assertTrue((80, 'tcp') in result) - self.assertTrue((443, 'tcp') in result) - self.assertTrue((443, 'udp') in result)