Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into NAS-131317
Browse files Browse the repository at this point in the history
  • Loading branch information
aiden3c committed Oct 10, 2024
2 parents cbc3412 + 232447a commit fbd46de
Show file tree
Hide file tree
Showing 13 changed files with 173 additions and 69 deletions.
4 changes: 2 additions & 2 deletions src/freenas/usr/local/share/truenas/eula.html
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
to time and at their sole discretion vary the terms and conditions of the maintenance and support
agreement based on different business environmental and personnel factors. For more information on
our Maintenance and Support contract, refer to
<a href="https://ixsystems.com/TrueNAS_SLA" rel="nofollow">https://ixsystems.com/TrueNAS_SLA</a>.
<a href="https://ixsystems.com/TrueNAS_SLA" target="_blank" rel="nofollow">https://ixsystems.com/TrueNAS_SLA</a>.
</p>
<p>
<strong>4.9 Force Majeure</strong> - iXsystems will not be deemed to be in default of any of the
Expand Down Expand Up @@ -205,7 +205,7 @@
TrueNAS Software may collect information relating to Your use of the Product, including information that has
been provided directly or indirectly through automated means. Usage of TrueNAS Software, geolocation
information, user login credentials, and device and operating system identification are allowed according
to iXsystems’ <a href="https://www.ixsystems.com/privacy-policy/" rel="nofollow">privacy policy</a>.
to iXsystems’ <a href="https://www.ixsystems.com/privacy-policy/" target="_blank" rel="nofollow">privacy policy</a>.
By accepting this Agreement and continuing to use the Product, you agree
that iXsystems may use any information provided through direct or indirect means in accordance with our
privacy policy and as permitted by applicable law, for purposes relating to management, compliance,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,19 @@
get_domain_info,
lookup_dc
)
from middlewared.utils.directoryservices.ad_constants import (
MAX_KERBEROS_START_TRIES
)
from middlewared.utils.directoryservices.constants import DSType
from middlewared.utils.directoryservices.krb5 import (
gss_get_current_cred,
kerberos_ticket,
)
from middlewared.utils.directoryservices.krb5_constants import krb5ccache
from middlewared.utils.directoryservices.krb5_error import (
KRB5Error,
KRB5ErrCode,
)
from time import sleep, time


Expand All @@ -26,8 +33,9 @@ def _ad_activate(self) -> None:

self.middleware.call_sync('service.stop', 'idmap')
self.middleware.call_sync('service.start', 'idmap', {'silent': False})
self.middleware.call_sync('kerberos.start')
# Wait for winbind to come online to provide some time for sysvol replication
self._ad_wait_wbclient()
self.middleware.call_sync('kerberos.start')

def _ad_wait_wbclient(self) -> None:
waited = 0
Expand All @@ -42,6 +50,33 @@ def _ad_wait_wbclient(self) -> None:

raise CallError('Timed out while waiting for domain to come online')

def _ad_wait_kerberos_start(self) -> None:
"""
After initial AD join we reconfigure kerberos to find KDC via DNS.
Unfortunately, depending on the AD environment it may take a significant
amount of time to replicate the new machine account to other domain
controllers. This means we have a retry loop on starting the kerberos
service.
"""
tries = 0
while tries < MAX_KERBEROS_START_TRIES:
try:
self.middleware.call_sync('kerberos.start')
return
except KRB5Error as krberr:
# KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN - account doesn't exist yet
# KRB5KDC_ERR_CLIENT_REVOKED - account locked (unlock maybe not replicated)
# KRB5KDC_ERR_PREAUTH_FAILED - bad password (password update not replicated)
if krberr.krb5_code not in (
KRB5ErrCode.KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN,
KRB5ErrCode.KRB5KDC_ERR_CLIENT_REVOKED,
KRB5ErrCode.KRB5KDC_ERR_PREAUTH_FAILED,
):
raise krberr

sleep(1)
tries += 1

def _ad_domain_info(self, domain: str, retry: bool = True) -> dict:
"""
Use libads from Samba to query information about the specified domain.
Expand Down Expand Up @@ -216,7 +251,11 @@ def _ad_post_join_actions(self, job: Job):
self.middleware.call_sync('activedirectory.register_dns')

# start up AD service
self._ad_activate()
try:
self._ad_activate()
except KRB5Error:
job.set_progress(65, 'Waiting for active directory to replicate machine account changes.')
self._ad_wait_kerberos_start()

def _ad_join_impl(self, job: Job, conf: dict):
"""
Expand Down Expand Up @@ -248,7 +287,12 @@ def _ad_join_impl(self, job: Job, conf: dict):
# we've now successfully joined AD and can proceed with post-join
# operations
try:
job.set_progress(60, 'Performing post-join actions')
return self._ad_post_join_actions(job)
except KRB5Error:
# if there's an actual unrecoverable kerberos error
# in our post-join actions then leaving AD will also fail
raise
except Exception as e:
# We failed to set up DNS / keytab cleanly
# roll back and present user with error
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,17 @@ def get_ses_enclosure_status(bsg_path):
logger.error('Error querying enclosure status for %r', bsg_path, exc_info=True)


def get_ses_enclosures(dmi):
def get_ses_enclosures(dmi, asdict=True):
rv = list()
with suppress(FileNotFoundError):
for i in Path('/sys/class/enclosure').iterdir():
bsg = f'/dev/bsg/{i.name}'
if (status := get_ses_enclosure_status(bsg)):
sg = next((i / 'device/scsi_generic').iterdir())
rv.append(Enclosure(bsg, f'/dev/{sg.name}', dmi, status).asdict())
enc = Enclosure(bsg, f'/dev/{sg.name}', dmi, status)
if asdict:
rv.append(enc.asdict())
else:
rv.append(enc)

return rv
74 changes: 26 additions & 48 deletions src/middlewared/middlewared/plugins/failover_/detect_enclosure.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,17 @@
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions

import pathlib
import subprocess
import re

from pyudev import Context

from ixhardware import PLATFORM_PREFIXES
from libsg3.ses import EnclosureDevice

from middlewared.service import Service
from middlewared.utils.functools_ import cache

from .ha_hardware import HA_HARDWARE

ENCLOSURES_DIR = '/sys/class/enclosure/'
from middlewared.plugins.enclosure_.ses_enclosures2 import get_ses_enclosures


class EnclosureDetectionService(Service):
Expand Down Expand Up @@ -90,52 +87,33 @@ def detect(self):
# down this path.
return HARDWARE, NODE

for enc in self.middleware.call_sync('enclosure.list_ses_enclosures'):
try:
info = EnclosureDevice(enc).get_element_descriptor()
except OSError:
self.logger.warning('Error querying element descriptor page for %r', enc, exc_info=True)
continue
else:
if re.search(HA_HARDWARE.ZSERIES_ENCLOSURE.value, info):
# Z-series Hardware (Echostream)
HARDWARE = 'ECHOSTREAM'
reg = re.search(HA_HARDWARE.ZSERIES_NODE.value, info)
NODE = reg.group(1)
if NODE:
for enc in get_ses_enclosures(product, False):
if enc.is_mseries:
HARDWARE = 'ECHOWARP'
if enc.product == '4024Sp':
return HARDWARE, 'A'
elif enc.product == '4024Ss':
return HARDWARE, 'B'
elif enc.is_xseries:
HARDWARE = 'PUMA'
esce = enc.elements.get('Enclosure Services Controller Electronics', {})
for i in esce.values():
if i['descriptor'].find('ESCE A_') != -1:
node = 'A'
elif i['descriptor'].find('ESCE B_') != -1:
node = 'B'
else:
break
elif re.search(HA_HARDWARE.XSERIES_ENCLOSURE.value, info):
# X-series Hardware (PUMA)
HARDWARE = 'PUMA'

sas_addr = ''
with open(f'{ENCLOSURES_DIR}{enc.split("/")[-1]}/device/sas_address') as f:
# We need to get the SAS address of the SAS expander first
sas_addr = f.read().strip()

# We then cast the SES address (deduced from SES VPD pages)
# to an integer and subtract 1. Then cast it back to hexadecimal.
# We then compare if the SAS expander's SAS address
# is in the SAS expanders SES address
if (reg := re.search(HA_HARDWARE.XSERIES_NODEA.value, info)) is not None:
ses_addr = hex(int(reg.group(1), 16) - 1)
if ses_addr == sas_addr:
NODE = 'A'
break

if (reg := re.search(HA_HARDWARE.XSERIES_NODEB.value, info)) is not None:
ses_addr = hex(int(reg.group(1), 16) - 1)
if ses_addr == sas_addr:
NODE = 'B'
break
elif (reg := re.search(HA_HARDWARE.MSERIES_ENCLOSURE.value, info)) is not None:
# M-series hardware (Echowarp)
HARDWARE = 'ECHOWARP'
if reg.group(2) == 'p':
NODE = 'A'
break
elif reg.group(2) == 's':
NODE = 'B'
break
# We then compare if the SAS expander's SAS address is the same
# as the SAS expanders SES address.
ses_addr = hex(int(i['descriptor'][7:].strip(), 16) - 1)
sas_addr = pathlib.Path(
f'/sys/class/enclosure/{enc.pci}/device/sas_address'
).read_text().strip()
if ses_addr == sas_addr:
return HARDWARE, node

return HARDWARE, NODE
49 changes: 49 additions & 0 deletions src/middlewared/middlewared/plugins/failover_/event.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from middlewared.service import Service, job, accepts
from middlewared.service_exception import CallError
from middlewared.schema import Dict, Bool, Int
from middlewared.plugins.docker.state_utils import Status
# from middlewared.plugins.failover_.zpool_cachefile import ZPOOL_CACHE_FILE
from middlewared.plugins.failover_.event_exceptions import AllZpoolsFailedToImport, IgnoreFailoverEvent, FencedError
from middlewared.plugins.failover_.scheduled_reboot_alert import WATCHDOG_ALERT_FILE
Expand Down Expand Up @@ -737,6 +738,8 @@ def vrrp_master(self, job, fobj, ifname, event):
self.run_call('kmip.initialize_keys')
logger.info('Done syncing encryption keys with KMIP server')

self.start_apps()

logger.info('Migrating interface information (if required)')
self.run_call('interface.persist_link_addresses')
logger.info('Done migrating interface information (if required)')
Expand Down Expand Up @@ -796,6 +799,14 @@ def vrrp_backup(self, job, fobj, ifname, event):

logger.warning('Entering BACKUP on "%s".', ifname)

# We will try to give some time to docker to gracefully stop before zpools will be forcefully
# exported. This is to avoid any potential data corruption.
stop_docker_thread = threading.Thread(
target=self.stop_apps,
name='failover_stop_docker',
)
stop_docker_thread.start()

# We stop netdata before exporting pools because otherwise we might have erroneous stuff
# getting logged and causing spam
logger.info('Stopping reporting metrics')
Expand Down Expand Up @@ -930,6 +941,44 @@ def vrrp_backup(self, job, fobj, ifname, event):

return self.FAILOVER_RESULT

def start_apps(self):
pool = self.run_call('docker.config')['pool']
if not pool:
self.middleware.call_sync('docker.state.set_status', Status.UNCONFIGURED.value)
logger.info('Skipping starting apps as they are not configured')
return

logger.info('Going to initialize apps plugin as %r pool is configured for apps', pool)
logger.info('Mounting relevant docker datasets')
try:
self.run_call('docker.fs_manage.mount')
except Exception:
self.middleware.call_sync('docker.state.set_status', Status.FAILED.value, 'Failed to mount docker datasets')
logger.error('Failed to mount docker datasets', exc_info=True)
return
else:
logger.info('Mounted docker datasets successfully')

logger.info('Starting docker service')
try:
self.run_call('docker.state.start_service')
except Exception:
logger.error('Failed to start docker service', exc_info=True)
else:
logger.info('Docker service started successfully')

def stop_apps(self):
if not self.middleware.call_sync('docker.config')['dataset']:
return

logger.info('Trying to gracefully stop docker service')
try:
self.run_call('service.stop', 'docker')
except Exception:
logger.error('Failed to stop docker service gracefully', exc_info=True)
else:
logger.info('Docker service stopped gracefully')


async def vrrp_fifo_hook(middleware, data):
ifname = data['ifname']
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from middlewared.service import CallError, job, Service

from .migrate_config_utils import migrate_chart_release_config
from .utils import get_sorted_backups
from .utils import get_k8s_ds, get_sorted_backups


logger = logging.getLogger('app_migrations')
Expand Down Expand Up @@ -80,6 +80,12 @@ def migrate(self, job, kubernetes_pool, options):
if not backup_config['releases']:
raise CallError(f'No old apps found in {options["backup_name"]!r} backup which can be migrated')

k8s_ds_encrypted = bool(self.middleware.call_sync(
'zfs.dataset.get_instance',
get_k8s_ds(kubernetes_pool),
{'extra': {'properties': ['encryption'], 'retrieve_children': False}}
)['encrypted'])

docker_config = self.middleware.call_sync('docker.config')
if docker_config['pool'] and docker_config['pool'] != kubernetes_pool:
# For good measure we stop docker service and unset docker pool if any configured
Expand Down Expand Up @@ -176,6 +182,8 @@ def migrate(self, job, kubernetes_pool, options):
self.middleware.call_sync('app.schema.action.update_volumes', chart_release['release_name'], [])

try:
if k8s_ds_encrypted:
raise CallError('Encrypted ix-volumes are not supported for migration')
app_volume_ds = get_app_parent_volume_ds_name(
os.path.join(kubernetes_pool, 'ix-apps'), chart_release['release_name']
)
Expand All @@ -190,7 +198,10 @@ def migrate(self, job, kubernetes_pool, options):
self.middleware.call_sync('zfs.dataset.promote', destination_ds)
self.middleware.call_sync('zfs.dataset.mount', destination_ds)
except CallError as e:
release_config['error'] = f'Failed to clone and promote ix-volumes: {e}'
if k8s_ds_encrypted:
release_config['error'] = 'App is using encrypted ix-volumes which are not supported for migration'
else:
release_config['error'] = f'Failed to clone and promote ix-volumes: {e}'
# We do this to make sure it does not show up as installed in the UI
shutil.rmtree(get_installed_app_path(chart_release['release_name']), ignore_errors=True)
else:
Expand Down
6 changes: 2 additions & 4 deletions src/middlewared/middlewared/plugins/pool_/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,10 +294,10 @@ async def __common_validation(self, verrors, schema, data, mode, parent=None, cu
to_check = {'acltype': None, 'aclmode': None}

if mode == 'UPDATE':
# Prevent users from changing acltype or xattr settings underneath an active SMB share
# Prevent users from changing acltype settings underneath an active SMB share
# If this dataset hosts an SMB share, then prompt the user to first delete the share,
# make the dataset change, the recreate the share.
keys = ('acltype', 'xattr')
keys = ('acltype',)
if any([data.get(key) for key in keys]):
ds_attachments = await self.middleware.call('pool.dataset.attachments', data['name'])
if smb_attachments := [share for share in ds_attachments if share['type'] == "SMB Share"]:
Expand Down Expand Up @@ -473,7 +473,6 @@ async def __common_validation(self, verrors, schema, data, mode, parent=None, cu
Inheritable(Str('aclmode', enum=['PASSTHROUGH', 'RESTRICTED', 'DISCARD']), has_default=False),
Inheritable(Str('acltype', enum=['OFF', 'NFSV4', 'POSIX']), has_default=False),
Str('share_type', default='GENERIC', enum=['GENERIC', 'MULTIPROTOCOL', 'NFS', 'SMB', 'APPS']),
Inheritable(Str('xattr', default='SA', enum=['ON', 'SA'])),
Ref('encryption_options'),
Bool('encryption', default=False),
Bool('inherit_encryption', default=True),
Expand Down Expand Up @@ -743,7 +742,6 @@ async def do_create(self, data):
('sync', None, str.lower, True),
('volblocksize', None, None, False),
('volsize', None, lambda x: str(x), False),
('xattr', None, str.lower, True),
('special_small_block_size', 'special_small_blocks', None, True),
):
if i not in data or (inheritable and data[i] == 'INHERIT'):
Expand Down
9 changes: 5 additions & 4 deletions src/middlewared/middlewared/plugins/truenas.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,11 @@ def get_eula(self):
"""
Returns the TrueNAS End-User License Agreement (EULA).
"""
if not os.path.exists(EULA_FILE):
return
with open(EULA_FILE, 'r', encoding='utf8') as f:
return f.read()
try:
with open(EULA_FILE, 'r', encoding='utf8') as f:
return f.read()
except FileNotFoundError:
pass

@accepts(roles=['READONLY_ADMIN'])
@returns(Bool('system_eula_accepted'))
Expand Down
Loading

0 comments on commit fbd46de

Please sign in to comment.