diff --git a/changelogs/fragments/9225-proxmox-module-refactoring.yml b/changelogs/fragments/9225-proxmox-module-refactoring.yml new file mode 100644 index 00000000000..5987dee87d9 --- /dev/null +++ b/changelogs/fragments/9225-proxmox-module-refactoring.yml @@ -0,0 +1,9 @@ +minor_changes: + - proxmox - refactors the proxmox module (https://github.com/ansible-collections/community.general/pull/9225) + - proxmox - fixes idempotency of template conversions (https://github.com/ansible-collections/community.general/pull/9225, https://github.com/ansible-collections/community.general/issues/8811). + - proxmox - fixes issues with disk_volume variable (https://github.com/ansible-collections/community.general/pull/9225, https://github.com/ansible-collections/community.general/issues/9065). + - proxmox - fixes incorrect parsing for bind-only mounts (https://github.com/ansible-collections/community.general/pull/9225, https://github.com/ansible-collections/community.general/issues/8982). + - proxmox module utils - fixes ignoring of ``choose_first_if_multiple`` argument in ``get_vmid`` (https://github.com/ansible-collections/community.general/pull/9225) + +deprecated_features: + - proxmox - removes default value of ``update`` parameter. This will be changed to a default of ``true`` in the future (https://github.com/ansible-collections/community.general/pull/9225) diff --git a/plugins/module_utils/proxmox.py b/plugins/module_utils/proxmox.py index b0037dacb38..4e0b4ecbddb 100644 --- a/plugins/module_utils/proxmox.py +++ b/plugins/module_utils/proxmox.py @@ -144,7 +144,7 @@ def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False): return None self.module.fail_json(msg='No VM with name %s found' % name) - elif len(vms) > 1: + elif len(vms) > 1 and not choose_first_if_multiple: self.module.fail_json(msg='Multiple VMs with name %s found, provide vmid instead' % name) return vms[0] diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py index bd33245eade..5d88b3fae80 100644 --- a/plugins/modules/proxmox.py +++ b/plugins/modules/proxmox.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: GPL-3.0-or-later from __future__ import absolute_import, division, print_function + __metaclass__ = type DOCUMENTATION = r""" @@ -45,10 +46,11 @@ - Older versions of Proxmox will accept a numeric value for size using the O(storage) parameter to automatically choose which storage to allocate from, however new versions enforce the C(:) syntax. - Additional options are available by using some combination of the following key-value pairs as a comma-delimited list - C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] + C([volume=] + [,acl=<1|0>] [,mountoptions=] [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=]). - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - - This option is mutually exclusive with O(storage) and O(disk_volume). + - This option is mutually exclusive with O(disk_volume). type: str disk_volume: description: @@ -68,13 +70,13 @@ - O(disk_volume.volume) is the name of an existing volume. - If not defined, the module will check if one exists. If not, a new volume will be created. - If defined, the volume must exist under that name. - - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path). + - Required only if O(disk_volume.storage) is defined, and mutually exclusive with O(disk_volume.host_path). type: str size: description: - O(disk_volume.size) is the size of the storage to use. - - The size is given in GB. - - Required only if O(disk_volume.storage) is defined and mutually exclusive with O(disk_volume.host_path). + - The size is given in GiB. + - Required only if O(disk_volume.storage) is defined, and mutually exclusive with O(disk_volume.host_path). type: int host_path: description: @@ -157,7 +159,7 @@ size: description: - O(mount_volumes[].size) is the size of the storage to use. - - The size is given in GB. + - The size is given in GiB. - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path). type: int host_path: @@ -186,7 +188,7 @@ storage: description: - Target storage. - - This Option is mutually exclusive with O(disk) and O(disk_volume). + - This Option is mutually exclusive with O(disk_volume) and O(mount_volumes). type: str default: 'local' ostype: @@ -226,8 +228,8 @@ update: description: - If V(true), the container will be updated with new values. + - The current default value of V(false) is deprecated and should be replaced with V(update=true) in version 11.0.0. type: bool - default: false version_added: 8.1.0 force: description: @@ -605,397 +607,27 @@ import re import time -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native - - from ansible_collections.community.general.plugins.module_utils.proxmox import ( - ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible) - -VZ_TYPE = None - - -class ProxmoxLxcAnsible(ProxmoxAnsible): - def content_check(self, node, ostemplate, template_store): - return [True for cnt in self.proxmox_api.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate] - - def is_template_container(self, node, vmid): - """Check if the specified container is a template.""" - proxmox_node = self.proxmox_api.nodes(node) - config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get() - return config.get('template', False) - - def update_config(self, vmid, node, disk, cpus, memory, swap, **kwargs): - if VZ_TYPE != "lxc": - self.module.fail_json( - changed=False, - msg="Updating configuration is only supported for LXC enabled proxmox clusters.", - ) - - def parse_disk_string(disk_string): - # Example strings: - # "acl=0,thin1:base-100-disk-1,size=8G" - # "thin1:10,backup=0" - # "local:20" - # "volume=local-lvm:base-100-disk-1,size=20G" - # "/mnt/bindmounts/shared,mp=/shared" - # "volume=/dev/USB01,mp=/mnt/usb01" - args = disk_string.split(",") - # If the volume is not explicitly defined but implicit by only passing a key, - # add the "volume=" key prefix for ease of parsing. - args = ["volume=" + arg if "=" not in arg else arg for arg in args] - # Then create a dictionary from the arguments - disk_kwargs = dict(map(lambda item: item.split("="), args)) - - VOLUME_PATTERN = r"""(?x) - (?:(?P[\w\-.]+): - (?:(?P\d+)| - (?P[^,\s]+)) - )| - (?P[^,\s]+) - """ - # DISCLAIMER: - # There are two things called a "volume": - # 1. The "volume" key which describes the storage volume, device or directory to mount into the container. - # 2. The storage volume of a storage-backed mount point in the PVE storage sub system. - # In this section, we parse the "volume" key and check which type of mount point we are dealing with. - pattern = re.compile(VOLUME_PATTERN) - match_dict = pattern.match(disk_kwargs.pop("volume")).groupdict() - match_dict = {k: v for k, v in match_dict.items() if v is not None} - - if "storage" in match_dict and "volume" in match_dict: - disk_kwargs["storage"] = match_dict["storage"] - disk_kwargs["volume"] = match_dict["volume"] - elif "storage" in match_dict and "size" in match_dict: - disk_kwargs["storage"] = match_dict["storage"] - disk_kwargs["size"] = match_dict["size"] - elif "host_path" in match_dict: - disk_kwargs["host_path"] = match_dict["host_path"] - - # Pattern matching only available in Python 3.10+ - # match match_dict: - # case {"storage": storage, "volume": volume}: - # disk_kwargs["storage"] = storage - # disk_kwargs["volume"] = volume - - # case {"storage": storage, "size": size}: - # disk_kwargs["storage"] = storage - # disk_kwargs["size"] = size - - # case {"host_path": host_path}: - # disk_kwargs["host_path"] = host_path - - return disk_kwargs - - def convert_mounts(mount_dict): - return_list = [] - for mount_key, mount_value in mount_dict.items(): - mount_config = parse_disk_string(mount_value) - return_list.append(dict(id=mount_key, **mount_config)) - - return return_list - - def build_volume( - key, - storage=None, - volume=None, - host_path=None, - size=None, - mountpoint=None, - options=None, - **kwargs - ): - if size is not None and isinstance(size, str): - size = size.strip("G") - # 1. Handle volume checks/creation - # 1.1 Check if defined volume exists - if volume is not None: - storage_content = self.get_storage_content(node, storage, vmid=vmid) - vol_ids = [vol["volid"] for vol in storage_content] - volid = "{storage}:{volume}".format(storage=storage, volume=volume) - if volid not in vol_ids: - self.module.fail_json( - changed=False, - msg="Storage {storage} does not contain volume {volume}".format( - storage=storage, - volume=volume, - ), - ) - vol_string = "{storage}:{volume},size={size}G".format( - storage=storage, volume=volume, size=size - ) - # 1.2 If volume not defined (but storage is), check if it exists - elif storage is not None: - api_node = self.proxmox_api.nodes( - node - ) # The node must exist, but not the LXC - try: - vol = api_node.lxc(vmid).get("config").get(key) - volume = parse_disk_string(vol).get("volume") - vol_string = "{storage}:{volume},size={size}G".format( - storage=storage, volume=volume, size=size - ) - - # If not, we have proxmox create one using the special syntax - except Exception: - vol_string = "{storage}:{size}".format(storage=storage, size=size) - else: - raise AssertionError('Internal error') - - # 1.3 If we have a host_path, we don't have storage, a volume, or a size - vol_string = ",".join( - [vol_string] + - ([] if host_path is None else [host_path]) + - ([] if mountpoint is None else ["mp={0}".format(mountpoint)]) + - ([] if options is None else ["{0}={1}".format(k, v) for k, v in options.items()]) + - ([] if not kwargs else ["{0}={1}".format(k, v) for k, v in kwargs.items()]) - ) - - return {key: vol_string} - - # Version limited features - minimum_version = {"tags": "6.1", "timezone": "6.3"} - proxmox_node = self.proxmox_api.nodes(node) - - pve_version = self.version() - - # Fail on unsupported features - for option, version in minimum_version.items(): - if pve_version < LooseVersion(version) and option in kwargs: - self.module.fail_json( - changed=False, - msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}".format( - option=option, version=version, pve_version=pve_version - ), - ) - - # Remove all empty kwarg entries - kwargs = {key: val for key, val in kwargs.items() if val is not None} - - if cpus is not None: - kwargs["cpulimit"] = cpus - if disk is not None: - kwargs["disk_volume"] = parse_disk_string(disk) - if "disk_volume" in kwargs: - disk_dict = build_volume(key="rootfs", **kwargs.pop("disk_volume")) - kwargs.update(disk_dict) - if memory is not None: - kwargs["memory"] = memory - if swap is not None: - kwargs["swap"] = swap - if "netif" in kwargs: - kwargs.update(kwargs.pop("netif")) - if "mounts" in kwargs: - kwargs["mount_volumes"] = convert_mounts(kwargs.pop("mounts")) - if "mount_volumes" in kwargs: - mounts_list = kwargs.pop("mount_volumes") - for mount_config in mounts_list: - key = mount_config.pop("id") - mount_dict = build_volume(key=key, **mount_config) - kwargs.update(mount_dict) - # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string - if "tags" in kwargs: - re_tag = re.compile(r"^[a-z0-9_][a-z0-9_\-\+\.]*$") - for tag in kwargs["tags"]: - if not re_tag.match(tag): - self.module.fail_json(msg="%s is not a valid tag" % tag) - kwargs["tags"] = ",".join(kwargs["tags"]) - - # fetch the current config - current_config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get() - - # compare the requested config against the current - update_config = False - for (arg, value) in kwargs.items(): - # if the arg isn't in the current config, it needs to be updated - if arg not in current_config: - update_config = True - break - # some values are lists, the order isn't always the same, so split them and compare by key - if isinstance(value, str): - current_values = current_config[arg].split(",") - requested_values = value.split(",") - for new_value in requested_values: - if new_value not in current_values: - update_config = True - break - # if it is not a list (or string) just compare the current value - else: - # some types don't match with the API, so forcing to string for comparison - if str(value) != str(current_config[arg]): - update_config = True - break - - if update_config: - getattr(proxmox_node, VZ_TYPE)(vmid).config.put(vmid=vmid, node=node, **kwargs) - else: - self.module.exit_json(changed=False, msg="Container config is already up to date") - - def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs): - - # Version limited features - minimum_version = { - 'tags': '6.1', - 'timezone': '6.3' - } - proxmox_node = self.proxmox_api.nodes(node) - - # Remove all empty kwarg entries - kwargs = {k: v for k, v in kwargs.items() if v is not None} - - pve_version = self.version() - - # Fail on unsupported features - for option, version in minimum_version.items(): - if pve_version < LooseVersion(version) and option in kwargs: - self.module.fail_json(changed=False, msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}". - format(option=option, version=version, pve_version=pve_version)) - - if VZ_TYPE == 'lxc': - kwargs['cpulimit'] = cpus - kwargs['rootfs'] = disk - if 'netif' in kwargs: - kwargs.update(kwargs['netif']) - del kwargs['netif'] - if 'mounts' in kwargs: - kwargs.update(kwargs['mounts']) - del kwargs['mounts'] - if 'pubkey' in kwargs: - if self.version() >= LooseVersion('4.2'): - kwargs['ssh-public-keys'] = kwargs['pubkey'] - del kwargs['pubkey'] - else: - kwargs['cpus'] = cpus - kwargs['disk'] = disk - - # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string - if 'tags' in kwargs: - re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') - for tag in kwargs['tags']: - if not re_tag.match(tag): - self.module.fail_json(msg='%s is not a valid tag' % tag) - kwargs['tags'] = ",".join(kwargs['tags']) - - if kwargs.get('ostype') == 'auto': - kwargs.pop('ostype') - - if clone is not None: - if VZ_TYPE != 'lxc': - self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.") - - clone_is_template = self.is_template_container(node, clone) - - # By default, create a full copy only when the cloned container is not a template. - create_full_copy = not clone_is_template - - # Only accept parameters that are compatible with the clone endpoint. - valid_clone_parameters = ['hostname', 'pool', 'description'] - if self.module.params['storage'] is not None and clone_is_template: - # Cloning a template, so create a full copy instead of a linked copy - create_full_copy = True - elif self.module.params['storage'] is None and not clone_is_template: - # Not cloning a template, but also no defined storage. This isn't possible. - self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.") - - if self.module.params['clone_type'] == 'linked': - if not clone_is_template: - self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.") - # Don't need to do more, by default create_full_copy is set to false already - elif self.module.params['clone_type'] == 'opportunistic': - if not clone_is_template: - # Cloned container is not a template, so we need our 'storage' parameter - valid_clone_parameters.append('storage') - elif self.module.params['clone_type'] == 'full': - create_full_copy = True - valid_clone_parameters.append('storage') - - clone_parameters = {} - - if create_full_copy: - clone_parameters['full'] = '1' - else: - clone_parameters['full'] = '0' - for param in valid_clone_parameters: - if self.module.params[param] is not None: - clone_parameters[param] = self.module.params[param] - - taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters) - else: - taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) - - while timeout: - if self.api_task_ok(node, taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(vmid=vmid, node=node, msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def start_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post() - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def stop_instance(self, vm, vmid, timeout, force): - if force: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) - else: - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post() - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - def convert_to_template(self, vm, vmid, timeout, force): - if getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running' and force: - self.stop_instance(vm, vmid, timeout, force) - # not sure why, but templating a container doesn't return a taskid - getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).template.post() - return True - - def umount_instance(self, vm, vmid, timeout): - taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post() - while timeout: - if self.api_task_ok(vm['node'], taskid): - return True - timeout -= 1 - if timeout == 0: - self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' % - self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) - - time.sleep(1) - return False - - -def main(): - module_args = proxmox_auth_argument_spec() - proxmox_args = dict( - vmid=dict(type='int', required=False), + ProxmoxAnsible, + ansible_to_proxmox_bool, + proxmox_auth_argument_spec, +) +from ansible_collections.community.general.plugins.module_utils.version import ( + LooseVersion, +) + + +def get_proxmox_args(): + return dict( + vmid=dict(type="int", required=False), node=dict(), pool=dict(), password=dict(no_log=True), hostname=dict(), ostemplate=dict(), - disk=dict(type='str'), + disk=dict(type="str"), disk_volume=dict( type="dict", options=dict( @@ -1015,12 +647,12 @@ def main(): ("host_path", "size"), ], ), - cores=dict(type='int'), - cpus=dict(type='int'), - memory=dict(type='int'), - swap=dict(type='int'), - netif=dict(type='dict'), - mounts=dict(type='dict'), + cores=dict(type="int"), + cpus=dict(type="int"), + memory=dict(type="int"), + swap=dict(type="int"), + netif=dict(type="dict"), + mounts=dict(type="dict"), mount_volumes=dict( type="list", elements="dict", @@ -1044,282 +676,1059 @@ def main(): ], ), ip_address=dict(), - ostype=dict(default='auto', choices=[ - 'auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged' - ]), - onboot=dict(type='bool'), - features=dict(type='list', elements='str'), - startup=dict(type='list', elements='str'), - storage=dict(default='local'), - cpuunits=dict(type='int'), + ostype=dict( + default="auto", + choices=[ + "auto", + "debian", + "devuan", + "ubuntu", + "centos", + "fedora", + "opensuse", + "archlinux", + "alpine", + "gentoo", + "nixos", + "unmanaged", + ], + ), + onboot=dict(type="bool"), + features=dict(type="list", elements="str"), + startup=dict(type="list", elements="str"), + storage=dict(default="local"), + cpuunits=dict(type="int"), nameserver=dict(), searchdomain=dict(), - timeout=dict(type='int', default=30), - update=dict(type='bool', default=False), - force=dict(type='bool', default=False), - purge=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'template']), - pubkey=dict(type='str'), - unprivileged=dict(type='bool', default=True), - description=dict(type='str'), - hookscript=dict(type='str'), - timezone=dict(type='str'), - clone=dict(type='int'), - clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']), - tags=dict(type='list', elements='str') + timeout=dict(type="int", default=30), + update=dict(type="bool"), + force=dict(type="bool", default=False), + purge=dict(type="bool", default=False), + state=dict( + default="present", + choices=[ + "present", + "absent", + "stopped", + "started", + "restarted", + "template", + ], + ), + pubkey=dict(type="str"), + unprivileged=dict(type="bool", default=True), + description=dict(type="str"), + hookscript=dict(type="str"), + timezone=dict(type="str"), + clone=dict(type="int"), + clone_type=dict( + default="opportunistic", choices=["full", "linked", "opportunistic"] + ), + tags=dict(type="list", elements="str"), ) - module_args.update(proxmox_args) - module = AnsibleModule( + +def get_ansible_module(): + module_args = proxmox_auth_argument_spec() + module_args.update(get_proxmox_args()) + + return AnsibleModule( argument_spec=module_args, required_if=[ - ('state', 'present', ['node', 'hostname']), - # Require one of clone, ostemplate, or update. Together with mutually_exclusive this ensures that we - # either clone a container or create a new one from a template file. - ('state', 'present', ('clone', 'ostemplate', 'update'), True), + ("state", "present", ["node", "hostname"]), + # Require one of clone, ostemplate, or update. + # Together with mutually_exclusive this ensures that we either + # clone a container or create a new one from a template file. + ("state", "present", ("clone", "ostemplate", "update"), True), ], required_together=[("api_token_id", "api_token_secret")], required_one_of=[("api_password", "api_token_id")], mutually_exclusive=[ - ( - "clone", - "ostemplate", - "update", - ), # Creating a new container is done either by cloning an existing one, or based on a template. - ("disk", "disk_volume", "storage"), + # Creating a new container is done either by cloning an existing one, or based on a template. + ("clone", "ostemplate", "update"), + ("disk", "disk_volume"), + ("storage", "disk_volume"), ("mounts", "mount_volumes"), ], ) - proxmox = ProxmoxLxcAnsible(module) - global VZ_TYPE - VZ_TYPE = 'openvz' if proxmox.version() < LooseVersion('4.0') else 'lxc' - - state = module.params['state'] - vmid = module.params['vmid'] - node = module.params['node'] - disk = module.params['disk'] - cpus = module.params['cpus'] - memory = module.params['memory'] - swap = module.params['swap'] - storage = module.params['storage'] - hostname = module.params['hostname'] - if module.params['ostemplate'] is not None: - template_store = module.params['ostemplate'].split(":")[0] - timeout = module.params['timeout'] - clone = module.params['clone'] - - # If vmid not set get the Next VM id from ProxmoxAPI - # If hostname is set get the VM id from ProxmoxAPI - if not vmid and state == 'present': - vmid = proxmox.get_nextvmid() - elif not vmid and hostname: - vmid = proxmox.get_vmid(hostname) - elif not vmid: - module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) - - # Create a new container - if state == 'present' and clone is None: - try: - if proxmox.get_vm(vmid, ignore_missing=True): - if module.params["update"]: - try: - proxmox.update_config(vmid, node, disk, cpus, memory, swap, - cores=module.params["cores"], - hostname=module.params["hostname"], - netif=module.params["netif"], - disk_volume=module.params["disk_volume"], - mounts=module.params["mounts"], - mount_volumes=module.params["mount_volumes"], - ip_address=module.params["ip_address"], - onboot=ansible_to_proxmox_bool(module.params["onboot"]), - cpuunits=module.params["cpuunits"], - nameserver=module.params["nameserver"], - searchdomain=module.params["searchdomain"], - features=",".join(module.params["features"]) - if module.params["features"] is not None - else None, - startup=",".join(module.params["startup"]) - if module.params["startup"] is not None - else None, - description=module.params["description"], - hookscript=module.params["hookscript"], - timezone=module.params["timezone"], - tags=module.params["tags"]) - module.exit_json( - changed=True, - vmid=vmid, - msg="Configured VM %s" % (vmid), - ) - except Exception as e: - module.fail_json( - vmid=vmid, - msg="Configuration of %s VM %s failed with exception: %s" - % (VZ_TYPE, vmid, e), - ) - if not module.params["force"]: - module.exit_json( - changed=False, - vmid=vmid, - msg="VM with vmid = %s is already exists" % vmid, - ) - # If no vmid was passed, there cannot be another VM named 'hostname' - if (not module.params['vmid'] and - proxmox.get_vmid(hostname, ignore_missing=True) and - not module.params['force']): - vmid = proxmox.get_vmid(hostname) - module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) - elif not proxmox.get_node(node): - module.fail_json(vmid=vmid, msg="node '%s' not exists in cluster" % node) - elif not proxmox.content_check(node, module.params['ostemplate'], template_store): - module.fail_json(vmid=vmid, msg="ostemplate '%s' not exists on node %s and storage %s" - % (module.params['ostemplate'], node, template_store)) - except Exception as e: - module.fail_json(vmid=vmid, msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) +class ProxmoxLxcAnsible(ProxmoxAnsible): + MINIMUM_VERSIONS = { + "disk_volume": "5.0", + "mount_volumes": "5.0", + "tags": "6.1", + "timezone": "6.3", + } - try: - proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone, - cores=module.params['cores'], - pool=module.params['pool'], - password=module.params['password'], - hostname=module.params['hostname'], - ostemplate=module.params['ostemplate'], - netif=module.params['netif'], - disk_volume=module.params["disk_volume"], - mounts=module.params['mounts'], - mount_volumes=module.params["mount_volumes"], - ostype=module.params['ostype'], - ip_address=module.params['ip_address'], - onboot=ansible_to_proxmox_bool(module.params['onboot']), - cpuunits=module.params['cpuunits'], - nameserver=module.params['nameserver'], - searchdomain=module.params['searchdomain'], - force=ansible_to_proxmox_bool(module.params['force']), - pubkey=module.params['pubkey'], - features=",".join(module.params['features']) if module.params['features'] is not None else None, - startup=",".join(module.params['startup']) if module.params['startup'] is not None else None, - unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']), - description=module.params['description'], - hookscript=module.params['hookscript'], - timezone=module.params['timezone'], - tags=module.params['tags']) - - module.exit_json(changed=True, vmid=vmid, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) - except Exception as e: - module.fail_json(vmid=vmid, msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) + def __init__(self, module): + super(ProxmoxLxcAnsible, self).__init__(module) - # Clone a container - elif state == 'present' and clone is not None: - try: - if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']: - module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid) - # If no vmid was passed, there cannot be another VM named 'hostname' - if (not module.params['vmid'] and - proxmox.get_vmid(hostname, ignore_missing=True) and - not module.params['force']): - vmid = proxmox.get_vmid(hostname) - module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid)) - if not proxmox.get_vm(clone, ignore_missing=True): - module.exit_json(changed=False, vmid=vmid, msg="Container to be cloned does not exist") - except Exception as e: - module.fail_json(vmid=vmid, msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e)) + self.VZ_TYPE = "openvz" if self.version() < LooseVersion("4.0") else "lxc" + self.params = self.module.params - try: - proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone) + def run(self): + state = self.params.get("state") - module.exit_json(changed=True, vmid=vmid, msg="Cloned VM %s from %s" % (vmid, clone)) - except Exception as e: - module.fail_json(vmid=vmid, msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e)) + vmid = self.params.get("vmid") + hostname = self.params.get("hostname") + + if not vmid and not hostname: + self.module.fail_json(msg="Either VMID or hostname must be provided.") + + if state == "present": + if hostname is None: + self.module.fail_json( + msg="Hostname must be provided when state is 'present'." + ) + else: + self.lxc_present( + vmid, + hostname, + node=self.params.get("node"), + update=self.params.get("update"), + force=self.params.get("force"), + ) + elif state == "absent": + self.lxc_absent( + vmid, + hostname, + node=self.params.get("node"), + timeout=self.params.get("timeout"), + purge=self.params.get("purge"), + ) + elif state == "started": + self.lxc_started( + vmid, + hostname, + node=self.params.get("node"), + timeout=self.params.get("timeout"), + ) + elif state == "stopped": + self.lxc_stopped( + vmid, + hostname, + node=self.params.get("node"), + timeout=self.params.get("timeout"), + force=self.params.get("force"), + ) + elif state == "restarted": + self.lxc_restarted( + vmid, + hostname, + node=self.params.get("node"), + timeout=self.params.get("timeout"), + force=self.params.get("force"), + ) + elif state == "template": + self.lxc_to_template( + vmid, + hostname, + node=self.params.get("node"), + timeout=self.params.get("timeout"), + force=self.params.get("force"), + ) + + def lxc_present(self, vmid, hostname, node, update, force): + self.check_supported_features() - elif state == 'started': try: - vm = proxmox.get_vm(vmid) - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid) + lxc = self.get_lxc_resource(vmid, hostname) + vmid = vmid or lxc["id"].split("/")[-1] + node = node or lxc["node"] + except LookupError: + lxc = None + vmid = vmid or self.get_nextvmid() + + if node is None: + raise ValueError( + "Argument 'node' is None, but should be found from VMID/hostname or provided." + ) - if proxmox.start_instance(vm, vmid, timeout): - module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid) - except Exception as e: - module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e)) + # check if the container exists already + if lxc is not None: + if update is None: + # TODO: Remove deprecation warning in version 11.0.0 + self.module.deprecate( + msg="The default value of False for 'update' has been deprecated and will be changed to True in version 11.0.0.", + version="11.0.0", + collection_name="community.general", + ) + update = False + + if update: + # Update it if we should + identifier = self.format_vm_identifier(vmid, hostname) + features = self.params.get("features") + if features is not None: + features = ",".join(features) + startup = self.params.get("startup") + if startup is not None: + startup = ",".join(startup) + self.update_lxc_instance( + vmid, + node, + cores=self.params.get("cores"), + cpus=self.params.get("cpus"), + cpuunits=self.params.get("cpuunits"), + description=self.params.get("description"), + disk=self.params.get("disk"), + disk_volume=self.params.get("disk_volume"), + features=features, + hookscript=self.params.get("hookscript"), + hostname=self.params.get("hostname"), + ip_address=self.params.get("ip_address"), + memory=self.params.get("memory"), + mounts=self.params.get("mounts"), + mount_volumes=self.params.get("mount_volumes"), + nameserver=self.params.get("nameserver"), + netif=self.params.get("netif"), + onboot=ansible_to_proxmox_bool(self.params.get("onboot")), + searchdomain=self.params.get("searchdomain"), + startup=startup, + swap=self.params.get("swap"), + tags=self.params.get("tags"), + timezone=self.params.get("timezone"), + ) + self.module.exit_json( + changed=True, vmid=vmid, msg="VM %s has been updated." % identifier + ) + elif not force: + # We're done if it shouldn't be forcefully created + identifier = self.format_vm_identifier(vmid, lxc["name"]) + self.module.exit_json( + changed=False, vmid=vmid, msg="VM %s already exists." % identifier + ) + self.module.debug( + "VM %s already exists, but we don't update and instead forcefully recreate it." + % identifier + ) + + self.new_lxc_instance( + vmid, + hostname, + node=self.params.get("node"), + clone_from=self.params.get("clone"), + ostemplate=self.params.get("ostemplate"), + force=force, + ) - elif state == 'stopped': + def lxc_absent(self, vmid, hostname, node, timeout, purge): try: - vm = proxmox.get_vm(vmid) + lxc = self.get_lxc_resource(vmid, hostname) + except LookupError: + identifier = self.format_vm_identifier(vmid, hostname) + self.module.exit_json( + changed=False, vmid=vmid, msg="VM %s is already absent." % (identifier) + ) + + vmid = vmid or lxc["id"].split("/")[-1] + node = node or lxc["node"] + + lxc_status = self.get_lxc_status(vmid, node) + identifier = self.format_vm_identifier(vmid, hostname) + + if lxc_status == "running": + self.module.exit_json( + changed=False, + vmid=vmid, + msg="VM %s is running. Stop it before deletion." % identifier, + ) + if lxc_status == "mounted": + self.module.exit_json( + changed=False, + vmid=vmid, + msg="VM %s is mounted. Stop it with force option before deletion." + % identifier, + ) + + self.remove_lxc_instance(vmid, node, timeout, purge) + self.module.exit_json( + changed=True, vmid=vmid, msg="VM %s removed." % identifier + ) + + def lxc_started(self, vmid, hostname, node, timeout): + lxc = self.get_lxc_resource(vmid, hostname) + vmid = vmid or lxc["id"].split("/")[-1] + hostname = hostname or lxc["name"] + identifier = self.format_vm_identifier(vmid, hostname) + node = node or lxc["node"] + lxc_status = self.get_lxc_status(vmid, lxc["node"]) + + if lxc_status == "running": + self.module.exit_json( + changed=False, vmid=vmid, msg="VM %s is already running." % identifier + ) + + self.start_lxc_instance(vmid, node, timeout) + self.module.exit_json( + changed=True, vmid=vmid, msg="VM %s started." % identifier + ) + + def lxc_stopped(self, vmid, hostname, node, timeout, force): + lxc = self.get_lxc_resource(vmid, hostname) + vmid = vmid or lxc["id"].split("/")[-1] + hostname = hostname or lxc["name"] + identifier = self.format_vm_identifier(vmid, hostname) + node = node or lxc["node"] + lxc_status = self.get_lxc_status(vmid, node) + + if lxc_status == "mounted": + if force: + self.umount_lxc_instance(vmid, hostname, timeout) + else: + self.module.exit_json( + changed=False, + vmid=vmid, + msg="VM %s is already stopped, but mounted. Use force option to umount it." + % identifier, + ) + + if lxc_status == "stopped": + self.module.exit_json( + changed=False, vmid=vmid, msg="VM %s is already stopped." % identifier + ) + + self.stop_lxc_instance(vmid, node, timeout, force) + self.module.exit_json( + changed=True, vmid=vmid, msg="VM %s stopped." % identifier + ) + + def lxc_restarted(self, vmid, hostname, node, timeout, force): + lxc = self.get_lxc_resource(vmid, hostname) + + vmid = vmid or lxc["id"].split("/")[-1] + hostname = hostname or lxc["name"] + node = node or lxc["node"] + + identifier = self.format_vm_identifier(vmid, hostname) + lxc_status = self.get_lxc_status(vmid, node) + + if lxc_status in ["stopped", "mounted"]: + self.module.exit_json( + changed=False, vmid=vmid, msg="VM %s is not running." % identifier + ) + + self.stop_lxc_instance(vmid, node, timeout, force) + self.start_lxc_instance(vmid, node, timeout) + self.module.exit_json( + changed=True, vmid=vmid, msg="VM %s is restarted." % identifier + ) + + def lxc_to_template(self, vmid, hostname, node, timeout, force): + lxc = self.get_lxc_resource(vmid, hostname) + vmid = vmid or lxc["id"].split("/")[-1] + hostname = hostname or lxc["name"] + node = node or lxc["node"] + identifier = self.format_vm_identifier(vmid, hostname) + + if self.is_template_container(node, vmid): + self.module.exit_json( + changed=False, + vmid=vmid, + msg="VM %s is already a template." % identifier, + ) + + lxc_status = self.get_lxc_status(vmid, node) + if lxc_status == "running" and force: + self.stop_instance(vmid, hostname, node, timeout, force) + + proxmox_node = self.proxmox_api.nodes(node) + getattr(proxmox_node, self.VZ_TYPE)(vmid).template.post() + self.module.exit_json( + changed=True, vmid=vmid, msg="VM %s converted to template." % identifier + ) + + def update_lxc_instance(self, vmid, node, **kwargs): + if self.VZ_TYPE != "lxc": + self.module.fail_json( + msg="Updating LXC containers is only supported for LXC-enabled clusters in PVE 4.0 and above." + ) + + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + self.validate_tags(kwargs.get("tags", [])) + + disk_updates = self.process_disk_keys( + vmid, + node, + kwargs.pop("disk", None), + kwargs.pop("disk_volume", None), + ) + mounts_updates = self.process_mount_keys( + vmid, + node, + kwargs.pop("mounts", None), + kwargs.pop("mount_volumes", None), + ) + kwargs.update(disk_updates) + kwargs.update(mounts_updates) + + if "cpus" in kwargs: + kwargs["cpulimit"] = kwargs.pop("cpus") + if "netif" in kwargs: + kwargs.update(kwargs.pop("netif")) + + # fetch current config + proxmox_node = self.proxmox_api.nodes(node) + current_config = getattr(proxmox_node, self.VZ_TYPE)(vmid).config.get() + + # create diff between the current and requested config + diff = {} + for arg, value in kwargs.items(): + # if the arg isn't in the current config, it needs to be added + if arg not in current_config: + diff[arg] = value + elif isinstance(value, str): + # compare all string values as lists as some of them may be lists separated by commas. order doesn't matter + current_values = current_config[arg].split(",") + requested_values = value.split(",") + for new_value in requested_values: + if new_value not in current_values: + diff[arg] = value + break + # if it's not a list (or string) just compare the values + # some types don't match with the API, so force a string comparison + elif str(value) != str(current_config[arg]): + diff[arg] = value + + if not diff: + self.module.exit_json( + changed=False, vmid=vmid, msg="Container config is already up to date." + ) + + # update the config + getattr(proxmox_node, self.VZ_TYPE)(vmid).config.put( + vmid=vmid, node=node, **kwargs + ) + + def new_lxc_instance(self, vmid, hostname, node, clone_from, ostemplate, force): + identifier = self.format_vm_identifier(vmid, hostname) + + if clone_from is not None: + self.clone_lxc_instance( + vmid, + node, + clone_from, + clone_type=self.params.get("clone_type"), + timeout=self.params.get("timeout"), + description=self.params.get("description"), + hostname=hostname, + pool=self.params.get("pool"), + storage=self.params.get("storage"), + ) + self.module.exit_json( + changed=True, + vmid=vmid, + msg="Cloned VM %s from %d" % (identifier, clone_from), + ) + + if ostemplate is not None: + features = self.params.get("features") + if features is not None: + features = ",".join(features) + startup = self.params.get("startup") + if startup is not None: + startup = ",".join(startup) + + self.create_lxc_instance( + vmid, + node, + ostemplate, + timeout=self.params.get("timeout"), + cores=self.params.get("cores"), + cpus=self.params.get("cpus"), + cpuunits=self.params.get("cpuunits"), + description=self.params.get("description"), + disk=self.params.get("disk"), + disk_volume=self.params.get("disk_volume"), + features=features, + force=ansible_to_proxmox_bool(force), + hookscript=self.params.get("hookscript"), + hostname=hostname, + ip_address=self.params.get("ip_address"), + memory=self.params.get("memory"), + mounts=self.params.get("mounts"), + mount_volumes=self.params.get("mount_volumes"), + nameserver=self.params.get("nameserver"), + netif=self.params.get("netif"), + onboot=ansible_to_proxmox_bool(self.params.get("onboot")), + ostype=self.params.get("ostype"), + password=self.params.get("password"), + pool=self.params.get("pool"), + pubkey=self.params.get("pubkey"), + searchdomain=self.params.get("searchdomain"), + startup=startup, + storage=self.params.get("storage"), + swap=self.params.get("swap"), + tags=self.params.get("tags"), + timezone=self.params.get("timezone"), + unprivileged=ansible_to_proxmox_bool(self.params.get("unprivileged")), + ) + self.module.exit_json( + changed=True, + vmid=vmid, + msg="Created VM %s from template %s" % (identifier, ostemplate), + ) + + self.module.fail_json( + vmid=vmid, + msg="VM %s does not exist but neither clone nor ostemplate were specified!" + % identifier, + ) + + def create_lxc_instance(self, vmid, node, ostemplate, timeout, **kwargs): + template_store = ostemplate.split(":")[0] + if not self.content_check(node, ostemplate, template_store): + self.module.fail_json( + vmid=vmid, + msg="ostemplate %s does not exist on node %s and storage %s." + % (ostemplate, node, template_store), + ) + + disk_updates = self.process_disk_keys( + vmid, + node, + kwargs.pop("disk"), + kwargs.pop("disk_volume"), + ) + mounts_updates = self.process_mount_keys( + vmid, + node, + kwargs.pop("mounts"), + kwargs.pop("mount_volumes"), + ) + kwargs.update(disk_updates) + kwargs.update(mounts_updates) + + # Remove empty values from kwargs + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + self.validate_tags(kwargs.get("tags", [])) + + if self.VZ_TYPE == "lxc": + if "cpus" in kwargs: + kwargs["cpuunits"] = kwargs.pop("cpus") + kwargs.update(kwargs.pop("netif", {})) + else: + if "mount_volumes" in kwargs: + kwargs.pop("mount_volumes") + self.module.warn( + "'mount_volumes' is not supported for non-LXC clusters. Ignoring keyword." + ) + + if "pubkey" in kwargs: + pubkey = kwargs.pop("pubkey") + if self.version() >= LooseVersion("4.2"): + kwargs["ssh-public-key"] = pubkey + else: + self.module.warn( + "'pubkey' is not supported for PVE 4.1 and below. Ignoring keyword." + ) + + if kwargs.get("ostype") == "auto": + kwargs.pop("ostype") + + proxmox_node = self.proxmox_api.nodes(node) + taskid = getattr(proxmox_node, self.VZ_TYPE).create( + vmid=vmid, ostemplate=ostemplate, **kwargs + ) + self.handle_api_timeout( + vmid, + node, + taskid, + timeout, + "Reached timeout while waiting for creation of VM %s from template %s" + % (vmid, ostemplate), + ) + + def clone_lxc_instance(self, vmid, node, clone_from, clone_type, timeout, **kwargs): + if self.VZ_TYPE != "lxc": + self.module.fail_json( + msg="Cloning is only supported for LXC-enabled clusters in PVE 4.0 and above." + ) + + # Remove empty values from kwargs + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + target_is_template = self.is_template_container(node, clone_from) + # By default, create a full copy only when the cloned container is not a template. + create_full_copy = not target_is_template + + # Only accept parameters that are compatible with the clone endpoint. + valid_clone_parameters = ["hostname", "pool", "description"] + + if "storage" not in kwargs and target_is_template: + # Cloning a template, so create a full copy instead of a linked copy + create_full_copy = True + elif "storage" not in kwargs and not target_is_template: + self.module.fail_json( + changed=False, + msg="Clone target container is not a template, storage needs to be specified.", + ) - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': - if module.params['force']: - if proxmox.umount_instance(vm, vmid, timeout): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid) + if clone_type == "linked" and not target_is_template: + self.module.fail_json( + changed=False, + msg="Cloning type 'linked' is only supported for template containers.", + ) + elif clone_type == "opportunistic" and not target_is_template: + # Cloned container is not a template, so we need our 'storage' parameter + valid_clone_parameters.append("storage") + elif clone_type == "full": + create_full_copy = True + valid_clone_parameters.append("storage") + + clone_parameters = {} + clone_parameters["full"] = ansible_to_proxmox_bool(create_full_copy) + + for param in valid_clone_parameters: + if param in kwargs: + clone_parameters[param] = kwargs[param] + + proxmox_node = self.proxmox_api.nodes(node) + taskid = getattr(proxmox_node, self.VZ_TYPE)(clone_from).clone.post( + newid=vmid, **clone_parameters + ) + self.handle_api_timeout( + vmid, + node, + taskid, + timeout, + timeout_msg="Reached timeout while waiting for VM to clone.", + ) + + def start_lxc_instance(self, vmid, node, timeout): + proxmox_node = self.proxmox_api.nodes(node) + taskid = getattr(proxmox_node, self.VZ_TYPE)(vmid).status.start.post() + + self.handle_api_timeout( + vmid, + node, + taskid, + timeout, + timeout_msg="Reached timeout while waiting for VM to start.", + ) + + def stop_lxc_instance(self, vmid, node, timeout, force): + stop_params = {} + if force: + stop_params["forceStop"] = 1 + + proxmox_node = self.proxmox_api.nodes(node) + taskid = getattr(proxmox_node, self.VZ_TYPE)(vmid).status.shutdown.post( + **stop_params + ) + + self.handle_api_timeout( + vmid, + node, + taskid, + timeout, + timeout_msg="Reached timeout while waiting for VM to stop.", + ) + + def umount_lxc_instance(self, vmid, node, timeout): + proxmox_node = self.proxmox_api.nodes(node) + taskid = getattr(proxmox_node, self.VZ_TYPE)(vmid).status.unmount.post() + + self.handle_api_timeout( + vmid, + node, + taskid, + timeout, + timeout_msg="Reached timeout while waiting for VM to be unmounted.", + ) + + def remove_lxc_instance(self, vmid, node, timeout, purge): + delete_params = {} + if purge: + delete_params["purge"] = 1 + + proxmox_node = self.proxmox_api.nodes(node) + taskid = getattr(proxmox_node, self.VZ_TYPE).delete(vmid, **delete_params) + + self.handle_api_timeout( + vmid, + node, + taskid, + timeout, + timeout_msg="Reached timeout while waiting for VM to be removed.", + ) + + def process_disk_keys(self, vmid, node, disk, disk_volume): + """ + Process disk keys and return a formatted disk volume with the `rootfs` key. + + Args: + vmid (int): VM identifier. + node (str): Node identifier. + disk (str, optional): Disk key in the format 'storage:volume'. Defaults to None. + disk_volume (Dict[str, Any], optional): Disk volume data. Defaults to None. + + Returns: + Dict[str, str]: Formatted disk volume with the `rootfs` or `disk` key (depending on the `VZ_TYPE`), or an empty dict if no disk volume is specified. + """ + if disk is None and disk_volume is None: + return {} + + disk_dict = {} + + if disk is not None: + if disk.isdigit(): + disk_dict["rootfs"] = disk + else: + disk_volume = self.parse_disk_string(disk) + + if disk_volume is not None: + disk_dict = self.build_volume(vmid, node, key="rootfs", **disk_volume) + + if self.VZ_TYPE != "lxc": + disk_dict["disk"] = disk_dict.pop("rootfs") + + return disk_dict + + def process_mount_keys(self, vmid, node, mounts, mount_volumes): + """ + Process mount keys and return a formatted mount volumes with the `mp[n]` keys. + + Args: + vmid (str): VM identifier. + node (str): Node identifier. + mounts (str, optional): Mount key in the format 'pool:volume'. Defaults to None. + mount_volumes (Dict[str, Any], optional): Mount volume data. Defaults to None. + + Returns: + Dict[str, str]: Formatted mount volumes with the `mp[n]` keys, or an empty dict if no mount volumes are specified. + """ + if mounts is not None: + mount_volumes = [] + for mount_key, mount_string in mounts.items(): + mount_config = self.parse_disk_string(mount_string) + mount_volumes.append(dict(id=mount_key, **mount_config)) + elif mount_volumes is None or mount_volumes == []: + return {} + + mounts_dict = {} + for mount_config in mount_volumes: + mount_key = mount_config.pop("id") + mount_dict = self.build_volume(vmid, node, key=mount_key, **mount_config) + mounts_dict.update(mount_dict) + + return mounts_dict + + def parse_disk_string(self, disk_string): + """ + Parse a disk string and return a dictionary with the disk details. + + Args: + disk_string (str): Disk string. + + Returns: + Dict[str, Any]: Disk details. + + Note: Below are some example disk strings that this function MUST be able to parse: + "acl=0,thin1:base-100-disk-1,size=8G" + "thin1:10,backup=0" + "local:20" + "local-lvm:0.50" + "tmp-dir:300/subvol-300-disk-0.subvol,acl=1,size=0T" + "tmplog-dir:300/vm-300-disk-0.raw,mp=/var/log,mountoptions=noatime,size=32M" + "volume=local-lvm:base-100-disk-1,size=20G" + "/mnt/bindmounts/shared,mp=/shared" + "volume=/dev/USB01,mp=/mnt/usb01" + """ + args = disk_string.split(",") + # If the volume is not explicitly defined but implicit by only passing a key, + # add the "volume=" key prefix for ease of parsing. + args = ["volume=" + arg if "=" not in arg else arg for arg in args] + # Then create a dictionary from the arguments + disk_kwargs = dict(map(lambda item: item.split("="), args)) + + VOLUME_PATTERN = r"""(?x) + ^ + (?: + (?: + (?P[\w\-.]+): + (?: + (?P\d+\.?\d*)| + (?P[^,\s]+) + ) + )| + (?P[^,\s]+) + ) + $ + """ + # DISCLAIMER: + # There are two things called a "volume": + # 1. The "volume" key which describes the storage volume, device or directory to mount into the container. + # 2. The storage volume of a storage-backed mount point in the PVE storage sub system. + # In this section, we parse the "volume" key and check which type of mount point we are dealing with. + pattern = re.compile(VOLUME_PATTERN) + volume_string = disk_kwargs.pop("volume") + match = pattern.match(volume_string) + if match is None: + raise ValueError(("Invalid volume string: %s", volume_string)) + match_dict = match.groupdict() + match_dict = {k: v for k, v in match_dict.items() if v is not None} + + if "storage" in match_dict and "volume" in match_dict: + disk_kwargs["storage"] = match_dict["storage"] + disk_kwargs["volume"] = match_dict["volume"] + elif "storage" in match_dict and "size" in match_dict: + disk_kwargs["storage"] = match_dict["storage"] + disk_kwargs["size"] = match_dict["size"] + elif "host_path" in match_dict: + disk_kwargs["host_path"] = match_dict["host_path"] + + # Pattern matching only available in Python 3.10+ + # TODO: Uncomment the following code once only Python 3.10+ is supported + # match match_dict: + # case {"storage": storage, "volume": volume}: + # disk_kwargs["storage"] = storage + # disk_kwargs["volume"] = volume + + # case {"storage": storage, "size": size}: + # disk_kwargs["storage"] = storage + # disk_kwargs["size"] = size + + # case {"host_path": host_path}: + # disk_kwargs["host_path"] = host_path + + return disk_kwargs + + def build_volume(self, vmid, node, key, storage=None, volume=None, host_path=None, size=None, mountpoint=None, options=None, **kwargs): + """ + Build a volume string for the specified VM. + + Args: + vmid (str): The VM ID. + node (str): The node where the VM resides. + key (str): The key for the volume in the VM's config. + storage (str, optional): The storage pool where the volume resides. Defaults to None. + volume (str, optional): The name of the volume. Defaults to None. + host_path (str, optional): The host path to mount. Defaults to None. + size (str | int, optional): The size of the volume in GiB. Defaults to None. + mountpoint (str, optional): The mountpoint for the volume. Defaults to None. + options (Dict[str, Any], optional): Additional options for the volume. Defaults to None. + **kwargs: Additional keyword arguments. + + Returns: + Dict[str, str]: The built volume string in the format {'volume_key': 'volume_string'}. + + Note: Further documentation can be found in the proxmox-api documentation: https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points + Note: To build a valid volume string, we need ONE of the following: + A volume name, storage name, and size + Only a storage name and size (to create a new volume or assign the volume automatically) + A host directory to mount into the container + """ + if isinstance(size, int): + size = str(size) + if size is not None and isfloat(size): + size += "G" # default to GiB + # Handle volume checks/creation + # TODO: Change the code below to pattern matching once only Python 3.10+ is supported + # 1. Check if defined volume exists + if volume is not None: + storage_content = self.get_storage_content(node, storage, vmid=vmid) + vol_ids = [vol["volid"] for vol in storage_content] + volid = "{storage}:{volume}".format(storage=storage, volume=volume) + if volid not in vol_ids: + self.module.fail_json( + changed=False, + msg="Storage {storage} does not contain volume {volume}".format( + storage=storage, + volume=volume, + ), + ) + vol_string = "{storage}:{volume},size={size}".format( + storage=storage, volume=volume, size=size + ) + # 2. If volume not defined (but storage is), check if it exists + elif storage is not None: + proxmox_node = self.proxmox_api.nodes( + node + ) # The node must exist, but not the LXC + try: + vol = proxmox_node.lxc(vmid).get("config").get(key) + volume = self.parse_disk_string(vol).get("volume") + vol_string = "{storage}:{volume},size={size}".format( + storage=storage, volume=volume, size=size + ) + + # If not, we have proxmox create one using the special syntax + except Exception: + if size is None: + raise ValueError( + "Size must be provided for storage-backed volume creation." + ) + elif size.endswith("G"): + size = size.rstrip("G") + vol_string = "{storage}:{size}".format(storage=storage, size=size) else: - module.exit_json(changed=False, vmid=vmid, - msg=("VM %s is already shutdown, but mounted. You can use force option to umount it.") % vmid) + raise ValueError( + "Size must be provided in GiB for storage-backed volume creation. Convert it to GiB or allocate a new storage manually." + ) + # 3. If we have a host_path, we don't have storage, a volume, or a size + # Then we don't have to do anything, just build and return the vol_string + elif host_path is not None: + vol_string = "" + else: + raise ValueError( + "Could not build a valid volume string. One of volume, storage, or host_path must be provided." + ) - if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is already shutdown" % vmid) + if host_path is not None: + vol_string += "," + host_path - if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid) - except Exception as e: - module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e)) + if mountpoint is not None: + vol_string += ",mp={}".format(mountpoint) - elif state == 'template': - try: - vm = proxmox.get_vm(vmid) + if options is not None: + vol_string += "," + ",".join( + ["{0}={1}".format(k, v) for k, v in options.items()] + ) - proxmox.convert_to_template(vm, vmid, timeout, force=module.params['force']) - module.exit_json(changed=True, msg="VM %s is converted to template" % vmid) - except Exception as e: - module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e)) + if kwargs: + vol_string += "," + ",".join( + ["{0}={1}".format(k, v) for k, v in kwargs.items()] + ) + return {key: vol_string} - elif state == 'restarted': - try: - vm = proxmox.get_vm(vmid) + def get_lxc_resource(self, vmid, hostname): + if not vmid and not hostname: + self.module.fail_json(msg="Either VMID or hostname must be provided.") + + if vmid: + vm = self.get_lxc_resource_by_id(vmid) + elif hostname: + vm = self.get_lxc_resource_by_hostname(hostname) + + vmid = vmid or vm["vmid"] + if vm["type"] != self.VZ_TYPE: + identifier = self.format_vm_identifier(vmid, hostname) + self.module.fail_json( + msg="The specified VM %s is not an %s." % (identifier, self.VZ_TYPE) + ) + + return vm - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] - if vm_status in ['stopped', 'mounted']: - module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid) + def get_lxc_resource_by_id(self, vmid): + vms = self.get_vm_resources() - if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and - proxmox.start_instance(vm, vmid, timeout)): - module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid) + vms = [vm for vm in vms if vm["vmid"] == vmid] + if len(vms) == 0: + raise LookupError("VM with VMID %d does not exist in cluster." % vmid) + + return vms[0] + + def get_lxc_resource_by_hostname(self, hostname): + vms = self.get_vm_resources() + + vms = [vm for vm in vms if vm["name"] == hostname] + if len(vms) == 0: + raise LookupError( + "VM with hostname %s does not exist in cluster." % hostname + ) + elif len(vms) > 1: + raise ValueError( + "Multiple VMs found with hostname %s. Please specify VMID." % hostname + ) + + return vms[0] + + def get_vm_resources(self): + try: + return self.proxmox_api.cluster.resources.get(type="vm") except Exception as e: - module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e)) + self.module.fail_json( + msg="Unable to retrieve list of %s VMs from cluster resources: %s" + % (self.VZ_TYPE, e) + ) - elif state == 'absent': - if not vmid: - module.exit_json(changed=False, vmid=vmid, msg='VM with hostname = %s is already absent' % hostname) + def get_lxc_status(self, vmid, node_name): try: - vm = proxmox.get_vm(vmid, ignore_missing=True) - if not vm: - module.exit_json(changed=False, vmid=vmid, msg="VM %s does not exist" % vmid) + proxmox_node = self.proxmox_api.nodes(node_name) + except Exception as e: + self.module.fail_json(msg="Unable to retrieve node information: %s" % e) + return getattr(proxmox_node, self.VZ_TYPE)(vmid).status.current.get() + + def format_vm_identifier(self, vmid, hostname): + if vmid and hostname: + return "%s (%s)" % (hostname, vmid) + elif hostname: + return hostname + else: + return to_native(vmid) - vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] - if vm_status == 'running': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid) + def handle_api_timeout(self, vmid, node, taskid, timeout, timeout_msg=""): + if timeout_msg != "": + timeout_msg = "%s " % timeout_msg - if vm_status == 'mounted': - module.exit_json(changed=False, vmid=vmid, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) + while timeout > 0: + if self.api_task_ok(node, taskid): + return + timeout -= 1 + time.sleep(1) - delete_params = {} + self.module.fail_json( + vmid=vmid, + taskid=taskid, + msg="%sLast line in task before timeout: %s" + % (timeout_msg, self.proxmox_api.nodes(node).tasks(taskid).log.get()[:1]), + ) - if module.params['purge']: - delete_params['purge'] = 1 + def is_template_container(self, node, target): + """Check if the specified container is a template.""" + proxmox_node = self.proxmox_api.nodes(node) + config = getattr(proxmox_node, self.VZ_TYPE)(target).config.get() + return config.get("template", False) - taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params) + def content_check(self, node, ostemplate, template_store): + """Check if the specified ostemplate is present in the specified storage.""" + proxmox_node = self.proxmox_api.nodes(node) + storage_contents = proxmox_node.storage(template_store).content.get() + return any(content["volid"] == ostemplate for content in storage_contents) + + def validate_tags(self, tags): + """Check if the specified tags are valid.""" + re_tag = re.compile(r"^[a-z0-9_][a-z0-9_\-\+\.]*$") + for tag in tags: + if not re_tag.match(tag): + self.module.fail_json(msg="%s is not a valid tag" % tag) + return False + return True - while timeout: - if proxmox.api_task_ok(vm['node'], taskid): - module.exit_json(changed=True, vmid=vmid, taskid=taskid, msg="VM %s removed" % vmid) - timeout -= 1 - if timeout == 0: - module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' - % proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1]) + def check_supported_features(self): + for option, version in self.MINIMUM_VERSIONS.items(): + if self.version() < LooseVersion(version) and option in self.module.params: + self.module.fail_json( + changed=False, + msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}".format( + option=option, version=version, pve_version=self.version() + ), + ) - time.sleep(1) - except Exception as e: - module.fail_json(vmid=vmid, msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e))) + +def isfloat(value): + if value is None: + return False + try: + float(value) + return True + except ValueError: + return False + + +def main(): + module = get_ansible_module() + proxmox = ProxmoxLxcAnsible(module) + + try: + proxmox.run() + except Exception as e: + module.fail_json(msg="An error occurred: %s" % to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main()