From 1008120bb4e0b13745e1db45c3f5173d357e3dd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Mon, 26 Dec 2022 15:03:04 +0100 Subject: [PATCH] qmemman: use memory hotplug in qmemman If VM supports memory hotplug, start it with maxmem=memory and then use memory hotplug to (potentially) increase its memory. The hotplug maxmem needs to be communicated to libvirt->libxl via element, so Xen will reserve appropriate amount of memory for internal accounting (this currently is a patch in qubes package, not available upstream yet). Qmemman almost had all the needed parts - especially mem_set function already adjusts maxmem too. The missing part was letting it know actual maxmem - do that via xenstore (as qmemman uses xenstore heavily already). The use of memory hotplug can be enabled (or disabled) explicitly via 'memory-hotplug' feature, or detected based on 'memory-hotplug-supported' file in dom0-provided kernel dir, or 'supported-feature.memory-hotplug' for VM-provided kernel. Fixes QubesOS/qubes-issues#7956 --- qubes/qmemman/__init__.py | 19 +++++++++++++++---- qubes/vm/qubesvm.py | 30 ++++++++++++++++++++++++++++++ templates/libvirt/xen.xml | 3 +++ 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/qubes/qmemman/__init__.py b/qubes/qmemman/__init__.py index 9876db4c4..7dcdf68af 100644 --- a/qubes/qmemman/__init__.py +++ b/qubes/qmemman/__init__.py @@ -44,6 +44,7 @@ def __init__(self, id): self.mem_used = None # used memory, computed based on meminfo self.id = id # domain id self.last_target = 0 # the last memset target + self.use_hoplug = False # use memory hotplug for mem-set self.no_progress = False # no react to memset self.slow_memset_react = False # slow react to memset (after few # tries still above target) @@ -96,7 +97,7 @@ def get_free_xen_memory(self): # used - do not count it as "free", because domain is free to use it # at any time # assumption: self.refresh_memactual was called before - # (so domdict[id].memory_actual is up to date) + # (so domdict[id].memory_actual is up-to-date) assigned_but_unused = functools.reduce( lambda acc, dom: acc + max(0, dom.last_target-dom.memory_current), self.domdict.values(), @@ -127,9 +128,16 @@ def refresh_memactual(self): self.domdict[id].memory_current, self.domdict[id].last_target ) - self.domdict[id].memory_maximum = self.xs.read('', '/local/domain/%s/memory/static-max' % str(id)) - if self.domdict[id].memory_maximum: - self.domdict[id].memory_maximum = int(self.domdict[id].memory_maximum)*1024 + hotplug_max = self.xs.read( + '', '/local/domain/%s/memory/hotplug-max' % str(id)) + static_max = self.xs.read( + '', '/local/domain/%s/memory/static-max' % str(id)) + if hotplug_max: + self.domdict[id].memory_maximum = int(hotplug_max)*1024 + self.domdict[id].use_hotplug = True + elif static_max: + self.domdict[id].memory_maximum = int(static_max)*1024 + self.domdict[id].use_hotplug = False else: self.domdict[id].memory_maximum = self.ALL_PHYS_MEM # the previous line used to be @@ -174,6 +182,9 @@ def mem_set(self, id, val): # handle Xen view of memory self.xs.write('', '/local/domain/' + id + '/memory/target', str(int(val/1024 - 16 * 1024))) + if self.domdict[id].use_hotplug: + self.xs.write('', '/local/domain/' + id + '/memory/static-max', + str(int(val / 1024))) # this is called at the end of ballooning, when we have Xen free mem already # make sure that past mem_set will not decrease Xen free mem diff --git a/qubes/vm/qubesvm.py b/qubes/vm/qubesvm.py index bc0a9cd08..5e831dc2f 100644 --- a/qubes/vm/qubesvm.py +++ b/qubes/vm/qubesvm.py @@ -23,6 +23,7 @@ import asyncio import base64 import grp +import pathlib import re import os import os.path @@ -1635,6 +1636,31 @@ def is_memory_balancing_possible(self): return False return True + @property + def use_memory_hotplug(self): + """Use memory hotplug for memory balancing. + This is preferred if supported, because it has less initial overhead + and reduces Xen's attack surface. + This needs to be supported by the VM's kernel. + """ + feature = self.features.check_with_template('memory-hotplug', None) + if feature is not None: + return bool(feature) + # if not explicitly set, check if support is advertised + # for dom0-provided kernel - check there + # Do not enable automatically for HVM, as qemu isn't happy about that - + # emulated devices wont work (DMA issues?); but still allow enabling + # manually in that case. + if self.kernel and self.virt_mode != 'hvm': + return (pathlib.Path(self.storage.kernels_dir) / + 'memory-hotplug-supported').exists() + # otherwise - check advertised VM's features + feature = self.features.check_with_template( + 'supported-feature.memory-hotplug', None) + if feature is not None: + return bool(feature) + return False + def request_memory(self, mem_required=None): if not qmemman_present: return None @@ -2287,6 +2313,10 @@ def create_qdb_entries(self): self.app.vmm.xs.set_permissions('', f"{xs_basedir}/memory/meminfo", [{'dom': self.xid}]) + if self.use_memory_hotplug: + self.app.vmm.xs.write('', + f"{xs_basedir}/memory/hotplug-max", + str(self.maxmem * 1024)) self.fire_event('domain-qdb-create') diff --git a/templates/libvirt/xen.xml b/templates/libvirt/xen.xml index cbdc34728..f60057781 100644 --- a/templates/libvirt/xen.xml +++ b/templates/libvirt/xen.xml @@ -5,6 +5,9 @@ {% if ((vm.virt_mode == 'hvm' and vm.devices['pci'].persistent() | list) or vm.maxmem == 0) -%} {{ vm.memory }} + {% elif vm.use_memory_hotplug %} + {{ vm.memory }} + {{ vm.maxmem }} {% else -%} {{ vm.maxmem }} {% endif -%}