diff --git a/debian/debian/control b/debian/debian/control
index 47559bd9038f5..79adc1a5c3b3a 100644
--- a/debian/debian/control
+++ b/debian/debian/control
@@ -21,7 +21,6 @@ Depends: acl,
kdump-tools,
keepalived,
ifenslave,
- pcm,
libnginx-mod-http-uploadprogress,
libvirt-daemon-system,
man-db,
diff --git a/src/freenas/usr/local/bin/snmp-agent.py b/src/freenas/usr/local/bin/snmp-agent.py
index d56882f117843..daa6af5f7f2ed 100755
--- a/src/freenas/usr/local/bin/snmp-agent.py
+++ b/src/freenas/usr/local/bin/snmp-agent.py
@@ -2,7 +2,7 @@
import threading
import time
import contextlib
-import pathlib
+import os
import libzfs
import netsnmpagent
@@ -413,11 +413,9 @@ def get_list_of_zvols():
zvols = set()
root_dir = '/dev/zvol/'
with contextlib.suppress(FileNotFoundError): # no zvols
- for zpool in pathlib.Path(root_dir).iterdir():
- for zvol in filter(lambda x: '@' not in x.name, zpool.iterdir()):
- zvol_normalized = zvol.as_posix().removeprefix(root_dir)
- zvol_normalized = zvol_normalized.replace('+', ' ')
- zvols.add(zvol_normalized)
+ for dir_path, unused_dirs, files in os.walk(root_dir):
+ for file in filter(lambda x: '@' not in x, files):
+ zvols.add(os.path.join(dir_path, file).removeprefix(root_dir).replace('+', ' '))
return list(zvols)
diff --git a/src/freenas/usr/local/share/pysnmp/mibs/TRUENAS-MIB.py b/src/freenas/usr/local/share/pysnmp/mibs/TRUENAS-MIB.py
index 9e976d5772ab9..c7c1bcdeca1d8 100644
--- a/src/freenas/usr/local/share/pysnmp/mibs/TRUENAS-MIB.py
+++ b/src/freenas/usr/local/share/pysnmp/mibs/TRUENAS-MIB.py
@@ -1,6 +1,6 @@
# PySNMP SMI module. Autogenerated from smidump -f python TRUENAS-MIB
-# by libsmi2pysnmp-0.1.3 at Fri Aug 18 13:42:49 2023,
-# Python version sys.version_info(major=2, minor=7, micro=17, releaselevel='final', serial=0)
+# by libsmi2pysnmp-0.1.3 at Wed Jul 24 12:51:26 2024,
+# Python version sys.version_info(major=3, minor=11, micro=2, releaselevel='final', serial=0)
# Imports
@@ -13,7 +13,7 @@
# Types
class AlertLevelType(Integer):
- subtypeSpec = Integer.subtypeSpec+SingleValueConstraint(1,2,3,5,7,4,6,)
+ subtypeSpec = Integer.subtypeSpec+SingleValueConstraint(1,2,3,4,5,6,7,)
namedValues = NamedValues(("info", 1), ("notice", 2), ("warning", 3), ("error", 4), ("critical", 5), ("alert", 6), ("emergency", 7), )
@@ -80,11 +80,11 @@ class AlertLevelType(Integer):
zfsArcC = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsArcC.setDescription("")
zfsArcMissPercent = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 8), DisplayString()).setMaxAccess("readonly")
-if mibBuilder.loadTexts: zfsArcMissPercent.setDescription("Arc Miss Percentage.\n(Note: Floating precision sent across SNMP as a String")
+if mibBuilder.loadTexts: zfsArcMissPercent.setDescription("Arc Miss Percentage.\nNote: Floating precision sent across SNMP as a String")
zfsArcCacheHitRatio = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 9), DisplayString()).setMaxAccess("readonly")
-if mibBuilder.loadTexts: zfsArcCacheHitRatio.setDescription("Arc Cache Hit Ration Percentage.\n(Note: Floating precision sent across SNMP as a String")
+if mibBuilder.loadTexts: zfsArcCacheHitRatio.setDescription("Arc Cache Hit Ration Percentage.\nNote: Floating precision sent across SNMP as a String")
zfsArcCacheMissRatio = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 3, 10), DisplayString()).setMaxAccess("readonly")
-if mibBuilder.loadTexts: zfsArcCacheMissRatio.setDescription("Arc Cache Miss Ration Percentage.\n(Note: Floating precision sent across SNMP as a String")
+if mibBuilder.loadTexts: zfsArcCacheMissRatio.setDescription("Arc Cache Miss Ration Percentage.\nNote: Floating precision sent across SNMP as a String")
l2arc = MibIdentifier((1, 3, 6, 1, 4, 1, 50536, 1, 4))
zfsL2ArcHits = MibScalar((1, 3, 6, 1, 4, 1, 50536, 1, 4, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zfsL2ArcHits.setDescription("")
@@ -127,7 +127,7 @@ class AlertLevelType(Integer):
# Notifications
-alert = NotificationType((1, 3, 6, 1, 4, 1, 50536, 2, 1, 1)).setObjects(*(("TRUENAS-MIB", "alertMessage"), ("TRUENAS-MIB", "alertLevel"), ("TRUENAS-MIB", "alertId"), ) )
+alert = NotificationType((1, 3, 6, 1, 4, 1, 50536, 2, 1, 1)).setObjects(*(("TRUENAS-MIB", "alertId"), ("TRUENAS-MIB", "alertLevel"), ("TRUENAS-MIB", "alertMessage"), ) )
if mibBuilder.loadTexts: alert.setDescription("An alert raised")
alertCancellation = NotificationType((1, 3, 6, 1, 4, 1, 50536, 2, 1, 2)).setObjects(*(("TRUENAS-MIB", "alertId"), ) )
if mibBuilder.loadTexts: alertCancellation.setDescription("An alert cancelled")
diff --git a/src/freenas/usr/local/share/snmp/mibs/TRUENAS-MIB.txt b/src/freenas/usr/local/share/snmp/mibs/TRUENAS-MIB.txt
index 6e12bec5b9935..994705911160a 100644
--- a/src/freenas/usr/local/share/snmp/mibs/TRUENAS-MIB.txt
+++ b/src/freenas/usr/local/share/snmp/mibs/TRUENAS-MIB.txt
@@ -293,7 +293,7 @@ zfsArcMissPercent OBJECT-TYPE
STATUS current
DESCRIPTION
"Arc Miss Percentage.
- (Note: Floating precision sent across SNMP as a String"
+ Note: Floating precision sent across SNMP as a String"
::= { arc 8 }
zfsArcCacheHitRatio OBJECT-TYPE
@@ -302,7 +302,7 @@ zfsArcCacheHitRatio OBJECT-TYPE
STATUS current
DESCRIPTION
"Arc Cache Hit Ration Percentage.
- (Note: Floating precision sent across SNMP as a String"
+ Note: Floating precision sent across SNMP as a String"
::= { arc 9 }
zfsArcCacheMissRatio OBJECT-TYPE
@@ -311,7 +311,7 @@ zfsArcCacheMissRatio OBJECT-TYPE
STATUS current
DESCRIPTION
"Arc Cache Miss Ration Percentage.
- (Note: Floating precision sent across SNMP as a String"
+ Note: Floating precision sent across SNMP as a String"
::= { arc 10 }
zfsL2ArcHits OBJECT-TYPE
diff --git a/src/middlewared/middlewared/alert/base.py b/src/middlewared/middlewared/alert/base.py
index f3582d6f0623d..62a66295724e1 100644
--- a/src/middlewared/middlewared/alert/base.py
+++ b/src/middlewared/middlewared/alert/base.py
@@ -120,6 +120,16 @@ async def delete(self, alerts, query):
"""
raise NotImplementedError
+ async def load(self, alerts):
+ """
+ This is called on system startup. Returns only those `alerts` that are still applicable to this system (i.e.,
+ corresponsing resources still exist).
+
+ :param alerts: all the existing alerts of the class
+ :return: `alerts` that should exist on this system.
+ """
+ return alerts
+
class SimpleOneShotAlertClass(OneShotAlertClass):
"""
diff --git a/src/middlewared/middlewared/alert/source/datasets.py b/src/middlewared/middlewared/alert/source/datasets.py
index 0906e4cf6b2a4..3f2f864f38599 100644
--- a/src/middlewared/middlewared/alert/source/datasets.py
+++ b/src/middlewared/middlewared/alert/source/datasets.py
@@ -22,9 +22,11 @@ async def check(self):
unencrypted_datasets = []
for dataset in await self.middleware.call('pool.dataset.query', [['encrypted', '=', True]]):
for child in dataset['children']:
- if child['name'] == f'{child["pool"]}/ix-applications' or child['name'].startswith(
- f'{child["pool"]}/ix-applications/'
- ):
+ if child['name'] in (
+ f'{child["pool"]}/ix-applications', f'{child["pool"]}/ix-apps'
+ ) or child['name'].startswith((
+ f'{child["pool"]}/ix-applications/', f'{child["pool"]}/ix-apps/'
+ )):
continue
if not child['encrypted']:
diff --git a/src/middlewared/middlewared/plugins/activedirectory.py b/src/middlewared/middlewared/plugins/activedirectory.py
index 8eadbed3db649..63bf10d1248fc 100644
--- a/src/middlewared/middlewared/plugins/activedirectory.py
+++ b/src/middlewared/middlewared/plugins/activedirectory.py
@@ -143,16 +143,20 @@ async def update_netbios_data(self, old, new):
@private
async def common_validate(self, new, old, verrors):
- try:
- if not (await self.middleware.call('activedirectory.netbiosname_is_ours', new['netbiosname'], new['domainname'], new['dns_timeout'])):
- verrors.add(
- 'activedirectory_update.netbiosname',
- f'NetBIOS name [{new["netbiosname"]}] appears to be in use by another computer in Active Directory DNS. '
- 'Further investigation and DNS corrections will be required prior to using the aforementioned name to '
- 'join Active Directory.'
- )
- except CallError:
- pass
+ if new['enable']:
+ try:
+ if not (await self.middleware.call(
+ 'activedirectory.netbiosname_is_ours',
+ new['netbiosname'], new['domainname'], new['dns_timeout'])
+ ):
+ verrors.add(
+ 'activedirectory_update.netbiosname',
+ f'NetBIOS name [{new["netbiosname"]}] appears to be in use by another computer in Active Directory DNS. '
+ 'Further investigation and DNS corrections will be required prior to using the aforementioned name to '
+ 'join Active Directory.'
+ )
+ except CallError:
+ pass
if new['kerberos_realm'] and new['kerberos_realm'] != old['kerberos_realm']:
realm = await self.middleware.call('kerberos.realm.query', [("id", "=", new['kerberos_realm'])])
diff --git a/src/middlewared/middlewared/plugins/alert.py b/src/middlewared/middlewared/plugins/alert.py
index 70bb2db73db3e..fef3e97e41e9a 100644
--- a/src/middlewared/middlewared/plugins/alert.py
+++ b/src/middlewared/middlewared/plugins/alert.py
@@ -239,6 +239,8 @@ async def initialize(self, load=True):
self.alerts = []
if load:
+ alerts_uuids = set()
+ alerts_by_classes = defaultdict(list)
for alert in await self.middleware.call("datastore.query", "system.alert"):
del alert["id"]
@@ -259,8 +261,15 @@ async def initialize(self, load=True):
alert = Alert(**alert)
- if not any(a.uuid == alert.uuid for a in self.alerts):
- self.alerts.append(alert)
+ if alert.uuid not in alerts_uuids:
+ alerts_uuids.add(alert.uuid)
+ alerts_by_classes[alert.klass.__name__].append(alert)
+
+ for alerts in alerts_by_classes.values():
+ if isinstance(alerts[0].klass, OneShotAlertClass):
+ alerts = await alerts[0].klass.load(alerts)
+
+ self.alerts.extend(alerts)
else:
await self.flush_alerts()
diff --git a/src/middlewared/middlewared/plugins/apps/app_scale.py b/src/middlewared/middlewared/plugins/apps/app_scale.py
index 0ea446b4e08db..e692c28db8ba8 100644
--- a/src/middlewared/middlewared/plugins/apps/app_scale.py
+++ b/src/middlewared/middlewared/plugins/apps/app_scale.py
@@ -1,5 +1,5 @@
from middlewared.schema import accepts, Str, returns
-from middlewared.service import CallError, job, Service
+from middlewared.service import job, Service
from .compose_utils import compose_action
@@ -10,7 +10,7 @@ class Config:
namespace = 'app'
cli_namespace = 'app'
- @accepts(Str('app_name'))
+ @accepts(Str('app_name'), roles=['APPS_WRITE'])
@returns()
@job(lock=lambda args: f'app_stop_{args[0]}')
def stop(self, job, app_name):
@@ -24,7 +24,7 @@ def stop(self, job, app_name):
)
job.set_progress(100, f'Stopped {app_name!r} app')
- @accepts(Str('app_name'))
+ @accepts(Str('app_name'), roles=['APPS_WRITE'])
@returns()
@job(lock=lambda args: f'app_start_{args[0]}')
def start(self, job, app_name):
@@ -36,7 +36,7 @@ def start(self, job, app_name):
compose_action(app_name, app_config['version'], 'up', force_recreate=True, remove_orphans=True)
job.set_progress(100, f'Started {app_name!r} app')
- @accepts(Str('app_name'))
+ @accepts(Str('app_name'), roles=['APPS_WRITE'])
@returns()
@job(lock=lambda args: f'app_redeploy_{args[0]}')
async def redeploy(self, job, app_name):
@@ -44,8 +44,4 @@ async def redeploy(self, job, app_name):
Redeploy `app_name` app.
"""
app = await self.middleware.call('app.get_instance', app_name)
- stop_job = await self.middleware.call('app.stop', app_name)
- await stop_job.wait()
- if stop_job.error:
- raise CallError(f'Failed to redeploy app: {stop_job.error}')
return await self.middleware.call('app.update_internal', job, app, {'values': {}}, 'Redeployment')
diff --git a/src/middlewared/middlewared/plugins/apps/crud.py b/src/middlewared/middlewared/plugins/apps/crud.py
index 9333203afb743..09dd55e234839 100644
--- a/src/middlewared/middlewared/plugins/apps/crud.py
+++ b/src/middlewared/middlewared/plugins/apps/crud.py
@@ -23,6 +23,7 @@ class Config:
namespace = 'app'
datastore_primary_key_type = 'string'
cli_namespace = 'app'
+ role_prefix = 'APPS'
ENTRY = Dict(
'app_query',
@@ -106,7 +107,7 @@ def query(self, app, filters, options):
return filter_list(apps, filters, options)
- @accepts(Str('app_name'))
+ @accepts(Str('app_name'), roles=['APPS_READ'])
@returns(Dict('app_config', additional_attrs=True))
def config(self, app_name):
"""
@@ -161,14 +162,18 @@ def do_create(self, job, data):
if version not in complete_app_details['versions']:
raise CallError(f'Version {version} not found in {data["catalog_app"]} app', errno=errno.ENOENT)
+ return self.create_internal(job, app_name, version, data['values'], complete_app_details)
+
+ @private
+ def create_internal(self, job, app_name, version, user_values, complete_app_details, dry_run=False):
app_version_details = complete_app_details['versions'][version]
self.middleware.call_sync('catalog.version_supported_error_check', app_version_details)
# The idea is to validate the values provided first and if it passes our validation test, we
# can move forward with setting up the datasets and installing the catalog item
new_values = self.middleware.call_sync(
- 'app.schema.normalize_and_validate_values', app_version_details, data['values'], False,
- get_installed_app_path(app_name)
+ 'app.schema.normalize_and_validate_values', app_version_details, user_values, False,
+ get_installed_app_path(app_name), None, dry_run is False,
)
job.set_progress(25, 'Initial Validation completed')
@@ -183,14 +188,15 @@ def do_create(self, job, data):
app_version_details = self.middleware.call_sync(
'catalog.app_version_details', get_installed_app_version_path(app_name, version)
)
- new_values = add_context_to_values(app_name, new_values, install=True)
+ new_values = add_context_to_values(app_name, new_values, app_version_details['app_metadata'], install=True)
update_app_config(app_name, version, new_values)
update_app_metadata(app_name, app_version_details)
job.set_progress(60, 'App installation in progress, pulling images')
- compose_action(app_name, version, 'up', force_recreate=True, remove_orphans=True)
+ if dry_run is False:
+ compose_action(app_name, version, 'up', force_recreate=True, remove_orphans=True)
except Exception as e:
- job.set_progress(80, f'Failure occurred while installing {data["app_name"]!r}, cleaning up')
+ job.set_progress(80, f'Failure occurred while installing {app_name!r}, cleaning up')
for method, args, kwargs in (
(compose_action, (app_name, version, 'down'), {'remove_orphans': True}),
(shutil.rmtree, (get_installed_app_path(app_name),), {}),
@@ -200,9 +206,10 @@ def do_create(self, job, data):
raise e from None
else:
- self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
- job.set_progress(100, f'{data["app_name"]!r} installed successfully')
- return self.get_instance__sync(app_name)
+ if dry_run is False:
+ self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
+ job.set_progress(100, f'{app_name!r} installed successfully')
+ return self.get_instance__sync(app_name)
@accepts(
Str('app_name'),
@@ -239,7 +246,7 @@ def update_internal(self, job, app, data, progress_keyword='Update'):
job.set_progress(25, 'Initial Validation completed')
- new_values = add_context_to_values(app_name, new_values, update=True)
+ new_values = add_context_to_values(app_name, new_values, app['metadata'], update=True)
update_app_config(app_name, app['version'], new_values)
update_app_metadata_for_portals(app_name, app['version'])
job.set_progress(60, 'Configuration updated, updating docker resources')
diff --git a/src/middlewared/middlewared/plugins/apps/ix_apps/lifecycle.py b/src/middlewared/middlewared/plugins/apps/ix_apps/lifecycle.py
index 503cb37e19be3..ed91c48962f6e 100644
--- a/src/middlewared/middlewared/plugins/apps/ix_apps/lifecycle.py
+++ b/src/middlewared/middlewared/plugins/apps/ix_apps/lifecycle.py
@@ -53,12 +53,13 @@ def get_action_context(app_name: str) -> dict[str, typing.Any]:
'is_upgrade': False,
'upgrade_metadata': {},
'app_name': app_name,
+ 'app_metadata': {},
})
def add_context_to_values(
- app_name: str, values: dict[str, typing.Any], *, install: bool = False, update: bool = False, upgrade: bool = False,
- upgrade_metadata: dict[str, typing.Any] = None, rollback: bool = False,
+ app_name: str, values: dict[str, typing.Any], app_metadata: dict, *, install: bool = False, update: bool = False,
+ upgrade: bool = False, upgrade_metadata: dict[str, typing.Any] = None, rollback: bool = False,
) -> dict[str, typing.Any]:
assert install or update or upgrade or rollback, 'At least one of install, update, rollback or upgrade must be True'
assert sum([install, rollback, update, upgrade]) <= 1, 'Only one of install, update, or upgrade can be True.'
@@ -77,6 +78,7 @@ def add_context_to_values(
for operation, _ in filter(lambda i: i[1], operation_map.items()):
action_context.update({
'operation': operation,
+ 'app_metadata': app_metadata,
f'is_{operation.lower()}': True,
**({'upgrade_metadata': upgrade_metadata} if operation == 'UPGRADE' else {})
})
diff --git a/src/middlewared/middlewared/plugins/apps/ix_apps/path.py b/src/middlewared/middlewared/plugins/apps/ix_apps/path.py
index 30adedc4a9e8f..effd7003271d0 100644
--- a/src/middlewared/middlewared/plugins/apps/ix_apps/path.py
+++ b/src/middlewared/middlewared/plugins/apps/ix_apps/path.py
@@ -15,6 +15,10 @@ def get_app_parent_config_path() -> str:
return os.path.join(IX_APPS_MOUNT_PATH, 'app_configs')
+def get_app_parent_volume_ds_name(docker_ds: str, app_name: str) -> str:
+ return os.path.join(docker_ds, 'app_mounts', app_name)
+
+
def get_app_parent_volume_path() -> str:
return os.path.join(IX_APPS_MOUNT_PATH, 'app_mounts')
diff --git a/src/middlewared/middlewared/plugins/apps/ix_apps/query.py b/src/middlewared/middlewared/plugins/apps/ix_apps/query.py
index 005a7ad16f53f..8d33c9cfc384e 100644
--- a/src/middlewared/middlewared/plugins/apps/ix_apps/query.py
+++ b/src/middlewared/middlewared/plugins/apps/ix_apps/query.py
@@ -74,11 +74,8 @@ def list_apps(
continue
workloads = translate_resources_to_desired_workflow(app_resources)
- # TODO: So when we stop an app, we remove all it's related resources and we wouldn't be in this for loop at all
- # however, when we stop docker service and start it again - the containers can be in exited state which means
- # we need to account for this.
- # This TODO however is for figuring out why app.start doesn't work with the compose actions we have in place
- # atm and should then we be maybe doing docker compose down on apps when stopping docker service
+ # When we stop docker service and start it again - the containers can be in exited
+ # state which means we need to account for this.
state = 'STOPPED'
for container in workloads['container_details']:
if container['state'] == 'starting':
@@ -184,6 +181,7 @@ def translate_resources_to_desired_workflow(app_resources: dict) -> dict:
'port_config': container_ports_config,
'state': state,
'volume_mounts': [v.__dict__ for v in volume_mounts],
+ 'id': container['Id'],
})
workloads['used_ports'].extend(container_ports_config)
volumes.update(volume_mounts)
diff --git a/src/middlewared/middlewared/plugins/apps/logs.py b/src/middlewared/middlewared/plugins/apps/logs.py
new file mode 100644
index 0000000000000..939ab23a0e21a
--- /dev/null
+++ b/src/middlewared/middlewared/plugins/apps/logs.py
@@ -0,0 +1,89 @@
+import errno
+
+import docker.errors
+from dateutil.parser import parse, ParserError
+from docker.models.containers import Container
+
+from middlewared.event import EventSource
+from middlewared.schema import Dict, Int, Str
+from middlewared.service import CallError
+from middlewared.validators import Range
+
+from .ix_apps.docker.utils import get_docker_client
+
+
+class AppContainerLogsFollowTailEventSource(EventSource):
+
+ """
+ Retrieve logs of a container/service in an app.
+
+ Name of app and id of container/service is required.
+ Optionally `tail_lines` and `limit_bytes` can be specified.
+
+ `tail_lines` is an option to select how many lines of logs to retrieve for the said container. It
+ defaults to 500. If set to `null`, it will retrieve complete logs of the container.
+ """
+ ACCEPTS = Dict(
+ Int('tail_lines', default=500, validators=[Range(min_=1)], null=True),
+ Str('app_name', required=True),
+ Str('container_id', required=True),
+ )
+ RETURNS = Dict(
+ Str('data', required=True),
+ Str('timestamp', required=True, null=True)
+ )
+
+ def __init__(self, *args, **kwargs):
+ super(AppContainerLogsFollowTailEventSource, self).__init__(*args, **kwargs)
+ self.logs_stream = None
+
+ def validate_log_args(self, app_name, container_id) -> Container:
+ app = self.middleware.call_sync('app.get_instance', app_name)
+ if app['state'] != 'RUNNING':
+ raise CallError(f'App "{app_name}" is not running')
+
+ if not any(c['id'] == container_id for c in app['active_workloads']['container_details']):
+ raise CallError(f'Container "{container_id}" not found in app "{app_name}"', errno=errno.ENOENT)
+
+ docker_client = get_docker_client()
+ try:
+ container = docker_client.containers.get(container_id)
+ except docker.errors.NotFound:
+ raise CallError(f'Container "{container_id}" not found')
+
+ return container
+
+ def run_sync(self):
+ app_name = self.arg['app_name']
+ container_id = self.arg['container_id']
+ tail_lines = self.arg['tail_lines'] or 'all'
+
+ container = self.validate_log_args(app_name, container_id)
+ self.logs_stream = container.logs(stream=True, follow=True, timestamps=True, tail=tail_lines)
+
+ for log_entry in map(bytes.decode, self.logs_stream):
+ # Event should contain a timestamp in RFC3339 format, we should parse it and supply it
+ # separately so UI can highlight the timestamp giving us a cleaner view of the logs
+ timestamp = log_entry.split(maxsplit=1)[0].strip()
+ try:
+ timestamp = str(parse(timestamp))
+ except (TypeError, ParserError):
+ timestamp = None
+ else:
+ log_entry = log_entry.split(maxsplit=1)[-1].lstrip()
+
+ self.send_event('ADDED', fields={'data': log_entry, 'timestamp': timestamp})
+
+ async def cancel(self):
+ await super().cancel()
+ if self.logs_stream:
+ await self.middleware.run_in_thread(self.logs_stream.close)
+
+ async def on_finish(self):
+ self.logs_stream = None
+
+
+def setup(middleware):
+ middleware.register_event_source(
+ 'app.container_log_follow', AppContainerLogsFollowTailEventSource, roles=['APPS_READ']
+ )
diff --git a/src/middlewared/middlewared/plugins/apps/resources.py b/src/middlewared/middlewared/plugins/apps/resources.py
index 7224033671e8b..5abd3d7328ac1 100644
--- a/src/middlewared/middlewared/plugins/apps/resources.py
+++ b/src/middlewared/middlewared/plugins/apps/resources.py
@@ -12,7 +12,7 @@ class Config:
namespace = 'app'
cli_namespace = 'app'
- @accepts()
+ @accepts(roles=['APPS_READ'])
@returns(List(items=[Ref('certificate_entry')]))
async def certificate_choices(self):
"""
@@ -23,7 +23,7 @@ async def certificate_choices(self):
{'select': ['name', 'id']}
)
- @accepts()
+ @accepts(roles=['APPS_READ'])
@returns(List(items=[Ref('certificateauthority_entry')]))
async def certificate_authority_choices(self):
"""
@@ -33,7 +33,7 @@ async def certificate_authority_choices(self):
'certificateauthority.query', [['revoked', '=', False], ['parsed', '=', True]], {'select': ['name', 'id']}
)
- @accepts()
+ @accepts(roles=['APPS_READ'])
@returns(List(items=[Int('used_port')]))
async def used_ports(self):
"""
@@ -46,7 +46,7 @@ async def used_ports(self):
for host_port in port_entry['host_ports']
})))
- @accepts()
+ @accepts(roles=['APPS_READ'])
@returns(Dict(Str('ip_choice')))
async def ip_choices(self):
"""
@@ -57,15 +57,15 @@ async def ip_choices(self):
for ip in await self.middleware.call('interface.ip_in_use', {'static': True, 'any': True})
}
- @accepts()
+ @accepts(roles=['APPS_READ'])
@returns(Dict('gpu_choices', additional_attrs=True))
async def gpu_choices(self):
"""
Returns GPU choices which can be used by applications.
"""
return {
- gpu['description']: {
- k: gpu[k] for k in ('vendor', 'description', 'vendor_specific_config')
+ gpu['pci_slot']: {
+ k: gpu[k] for k in ('vendor', 'description', 'vendor_specific_config', 'pci_slot')
}
for gpu in await self.gpu_choices_internal()
if not gpu['error']
diff --git a/src/middlewared/middlewared/plugins/apps/rollback.py b/src/middlewared/middlewared/plugins/apps/rollback.py
index 4dc0ae0ebcb54..4c09d914e54e2 100644
--- a/src/middlewared/middlewared/plugins/apps/rollback.py
+++ b/src/middlewared/middlewared/plugins/apps/rollback.py
@@ -20,7 +20,8 @@ class Config:
'options',
Str('app_version', empty=False, required=True),
Bool('rollback_snapshot', default=True),
- )
+ ),
+ roles=['APPS_WRITE'],
)
@returns(Ref('app_query'))
@job(lock=lambda args: f'app_rollback_{args[0]}')
@@ -45,7 +46,7 @@ def rollback(self, job, app_name, options):
'app.schema.normalize_and_validate_values', rollback_version, config, False,
get_installed_app_path(app_name), app,
)
- new_values = add_context_to_values(app_name, new_values, rollback=True)
+ new_values = add_context_to_values(app_name, new_values, rollback_version['app_metadata'], rollback=True)
update_app_config(app_name, options['app_version'], new_values)
job.set_progress(
@@ -86,7 +87,7 @@ def rollback(self, job, app_name, options):
return self.middleware.call_sync('app.get_instance', app_name)
- @accepts(Str('app_name'))
+ @accepts(Str('app_name'), roles=['APPS_READ'])
@returns(List('rollback_versions', items=[Str('version')]))
def rollback_versions(self, app_name):
"""
diff --git a/src/middlewared/middlewared/plugins/apps/schema_action_context.py b/src/middlewared/middlewared/plugins/apps/schema_action_context.py
index a79e13a6c4d38..1ec90117aea02 100644
--- a/src/middlewared/middlewared/plugins/apps/schema_action_context.py
+++ b/src/middlewared/middlewared/plugins/apps/schema_action_context.py
@@ -2,6 +2,7 @@
from middlewared.service import CallError, Service
+from .ix_apps.path import get_app_parent_volume_ds_name
from .utils import DATASET_DEFAULTS
@@ -12,7 +13,9 @@ class Config:
private = True
async def update_volumes(self, app_name, volumes):
- app_volume_ds = os.path.join((await self.middleware.call('docker.config'))['dataset'], 'app_mounts', app_name)
+ app_volume_ds = get_app_parent_volume_ds_name(
+ (await self.middleware.call('docker.config'))['dataset'], app_name
+ )
user_wants = {app_volume_ds: {'properties': {}}} | {os.path.join(app_volume_ds, v['name']): v for v in volumes}
existing_datasets = {
diff --git a/src/middlewared/middlewared/plugins/apps/schema_normalization.py b/src/middlewared/middlewared/plugins/apps/schema_normalization.py
index ee17fd8ff8f49..fa40f69552a56 100644
--- a/src/middlewared/middlewared/plugins/apps/schema_normalization.py
+++ b/src/middlewared/middlewared/plugins/apps/schema_normalization.py
@@ -28,7 +28,9 @@ def __init__(self, *args, **kwargs):
for method in REF_MAPPING.values():
assert isinstance(getattr(self, f'normalize_{method}'), Callable) is True
- async def normalize_and_validate_values(self, item_details, values, update, app_dir, app_data=None):
+ async def normalize_and_validate_values(
+ self, item_details, values, update, app_dir, app_data=None, perform_actions=True,
+ ):
dict_obj = await self.middleware.call(
'app.schema.validate_values', item_details, values, update, app_data,
)
@@ -39,7 +41,8 @@ async def normalize_and_validate_values(self, item_details, values, update, app_
},
'actions': [],
})
- await self.perform_actions(context)
+ if perform_actions:
+ await self.perform_actions(context)
return new_values
async def perform_actions(self, context):
diff --git a/src/middlewared/middlewared/plugins/apps/upgrade.py b/src/middlewared/middlewared/plugins/apps/upgrade.py
index c6e0db25801ef..2496198f241b1 100644
--- a/src/middlewared/middlewared/plugins/apps/upgrade.py
+++ b/src/middlewared/middlewared/plugins/apps/upgrade.py
@@ -22,7 +22,8 @@ class Config:
'options',
Dict('values', additional_attrs=True, private=True),
Str('app_version', empty=False, default='latest'),
- )
+ ),
+ roles=['APPS_WRITE'],
)
@returns(Ref('app_query'))
@job(lock=lambda args: f'app_upgrade_{args[0]}')
@@ -49,15 +50,19 @@ def upgrade(self, job, app_name, options):
# 5) Docker should be notified to recreate resources and to let upgrade to commence
# 6) Update collective metadata config to reflect new version
# 7) Finally create ix-volumes snapshot for rollback
- with upgrade_config(app_name, upgrade_version) as version_path:
+ with upgrade_config(app_name, upgrade_version):
config = get_current_app_config(app_name, app['version'])
config.update(options['values'])
- app_version_details = self.middleware.call_sync('catalog.app_version_details', version_path)
new_values = self.middleware.call_sync(
- 'app.schema.normalize_and_validate_values', app_version_details, config, False,
+ 'app.schema.normalize_and_validate_values', upgrade_version, config, False,
get_installed_app_path(app_name), app,
)
- new_values = add_context_to_values(app_name, new_values, upgrade=True, upgrade_metadata={})
+ new_values = add_context_to_values(
+ app_name, new_values, upgrade_version['app_metadata'], upgrade=True, upgrade_metadata={
+ 'old_version_metadata': app['metadata'],
+ 'new_version_metadata': upgrade_version['app_metadata'],
+ }
+ )
update_app_config(app_name, upgrade_version['version'], new_values)
job.set_progress(40, f'Configuration updated for {app_name!r}, upgrading app')
@@ -87,7 +92,8 @@ def upgrade(self, job, app_name, options):
Dict(
'options',
Str('app_version', empty=False, default='latest'),
- )
+ ),
+ roles=['APPS_READ'],
)
@returns(Dict(
Str('latest_version', description='Latest version available for the app'),
diff --git a/src/middlewared/middlewared/plugins/catalog/app_version.py b/src/middlewared/middlewared/plugins/catalog/app_version.py
index 0d203d0420e62..768a137443703 100644
--- a/src/middlewared/middlewared/plugins/catalog/app_version.py
+++ b/src/middlewared/middlewared/plugins/catalog/app_version.py
@@ -21,6 +21,7 @@ class Config:
'app_version_details',
Str('train', required=True),
),
+ roles=['CATALOG_READ'],
)
@returns(Dict(
# TODO: Make sure keys here are mapped appropriately
diff --git a/src/middlewared/middlewared/plugins/catalog/apps.py b/src/middlewared/middlewared/plugins/catalog/apps.py
index 0745c756811ba..3b3e8f875991b 100644
--- a/src/middlewared/middlewared/plugins/catalog/apps.py
+++ b/src/middlewared/middlewared/plugins/catalog/apps.py
@@ -8,7 +8,7 @@ class AppService(Service):
class Config:
cli_namespace = 'app'
- @filterable()
+ @filterable(roles=['CATALOG_READ'])
@filterable_returns(Ref('available_apps'))
async def latest(self, filters, options):
"""
@@ -22,8 +22,7 @@ async def latest(self, filters, options):
), filters, options
)
- # TODO: Roles are missing
- @filterable()
+ @filterable(roles=['CATALOG_READ'])
@filterable_returns(Dict(
'available_apps',
Bool('healthy', required=True),
@@ -78,7 +77,7 @@ def available(self, filters, options):
return filter_list(results, filters, options)
- @accepts()
+ @accepts(roles=['CATALOG_READ'])
@returns(List(items=[Str('category')]))
async def categories(self):
"""
@@ -86,7 +85,7 @@ async def categories(self):
"""
return sorted(list(await self.middleware.call('catalog.retrieve_mapped_categories')))
- @accepts(Str('app_name'), Str('train'))
+ @accepts(Str('app_name'), Str('train'), roles=['CATALOG_READ'])
@returns(List(items=[Ref('available_apps')]))
def similar(self, app_name, train):
"""
diff --git a/src/middlewared/middlewared/plugins/catalog/apps_details.py b/src/middlewared/middlewared/plugins/catalog/apps_details.py
index b0e4ced9cb8c9..d624130bfb284 100644
--- a/src/middlewared/middlewared/plugins/catalog/apps_details.py
+++ b/src/middlewared/middlewared/plugins/catalog/apps_details.py
@@ -53,33 +53,37 @@ def cached(self, label):
Bool('retrieve_all_trains', default=True),
List('trains', items=[Str('train_name')]),
),
+ roles=['CATALOG_READ']
)
@returns(Dict(
'trains',
additional_attrs=True,
example={
- 'charts': {
- 'chia': {
- 'name': 'chia',
- 'categories': ['storage', 'crypto'],
- 'app_readme': 'app readme here',
- 'location': '/mnt/evo/ix-applications/catalogs/github_com_truenas_charts_git_master/charts/chia',
+ 'stable': {
+ 'plex': {
+ 'app_readme': '
Plex
',
+ 'categories': ['media'],
+ 'description': 'Plex is a media server that allows you to stream your media to any Plex client.',
'healthy': True,
'healthy_error': None,
- 'latest_version': '1.2.0',
- 'latest_app_version': '1.1.6',
- 'last_update': '2023-02-01 22:55:31',
- 'icon_url': 'https://www.chia.net/img/chia_logo.svg',
+ 'home': 'https://plex.tv',
+ 'location': '/mnt/.ix-apps/truenas_catalog/stable/plex',
+ 'latest_version': '1.0.0',
+ 'latest_app_version': '1.40.2.8395',
+ 'latest_human_version': '1.40.2.8395_1.0.0',
+ 'last_update': '2024-07-30 13:40:47+00:00',
+ 'name': 'plex',
'recommended': False,
- 'title': 'Chia',
- 'description': 'App description here',
- 'maintainers': [],
- 'tags': ['finance', 'crypto', 'blockchain'],
- 'home': 'https://www.chia.net/',
- 'screenshots': [],
- 'sources': [],
- }
- }
+ 'title': 'Plex',
+ 'maintainers': [
+ {'email': 'dev@ixsystems.com', 'name': 'truenas', 'url': 'https://www.truenas.com/'},
+ ],
+ 'tags': ['plex', 'media', 'entertainment', 'movies', 'series', 'tv', 'streaming'],
+ 'screenshots': ['https://media.sys.truenas.net/apps/plex/screenshots/screenshot2.png'],
+ 'sources': ['https://plex.tv', 'https://hub.docker.com/r/plexinc/pms-docker'],
+ 'icon_url': 'https://media.sys.truenas.net/apps/plex/icons/icon.png'
+ },
+ },
}
))
def apps(self, options):
@@ -225,7 +229,7 @@ async def get_normalized_questions_context(self):
return {
'timezones': await self.middleware.call('system.general.timezone_choices'),
'system.general.config': await self.middleware.call('system.general.config'),
- 'unused_ports': await self.middleware.call('port.get_unused_ports'),
+ 'unused_ports': await self.middleware.call('port.get_unused_ports', 1),
'certificates': await self.middleware.call('app.certificate_choices'),
'certificate_authorities': await self.middleware.call('app.certificate_authority_choices'),
'ip_choices': await self.middleware.call('app.ip_choices'),
diff --git a/src/middlewared/middlewared/plugins/catalog/sync.py b/src/middlewared/middlewared/plugins/catalog/sync.py
index 5eb0ca8710489..7905959ac2614 100644
--- a/src/middlewared/middlewared/plugins/catalog/sync.py
+++ b/src/middlewared/middlewared/plugins/catalog/sync.py
@@ -15,7 +15,7 @@ class CatalogService(Service):
async def synced(self):
return self.SYNCED
- @accepts()
+ @accepts(roles=['CATALOG_WRITE'])
@returns()
@job(lock='official_catalog_sync')
async def sync(self, job):
diff --git a/src/middlewared/middlewared/plugins/catalog/update.py b/src/middlewared/middlewared/plugins/catalog/update.py
index 427ef638845cd..89e2094f35b44 100644
--- a/src/middlewared/middlewared/plugins/catalog/update.py
+++ b/src/middlewared/middlewared/plugins/catalog/update.py
@@ -27,6 +27,7 @@ class Config:
datastore_primary_key_type = 'string'
cli_namespace = 'app.catalog'
namespace = 'catalog'
+ role_prefix = 'CATALOG'
ENTRY = Dict(
'catalog_create',
diff --git a/src/middlewared/middlewared/plugins/cloud_sync.py b/src/middlewared/middlewared/plugins/cloud_sync.py
index 37345d7ed682f..acb98edd9d597 100644
--- a/src/middlewared/middlewared/plugins/cloud_sync.py
+++ b/src/middlewared/middlewared/plugins/cloud_sync.py
@@ -105,8 +105,12 @@ async def __aenter__(self):
if self.cloud_sync.get("path"):
if os.path.dirname(self.cloud_sync.get("path").rstrip("/")) == "/mnt":
- rclone_filter.append("- /ix-applications")
- rclone_filter.append("- /ix-applications/**")
+ rclone_filter.extend([
+ "- /ix-applications",
+ "- /ix-apps",
+ "- /ix-applications/**",
+ "- /ix-apps/**",
+ ])
for item in self.cloud_sync.get("exclude") or []:
rclone_filter.append(f"- {item}")
@@ -547,6 +551,10 @@ async def delete(self, alerts, query):
alerts
))
+ async def load(self, alerts):
+ task_ids = {str(task["id"]) for task in await self.middleware.call("cloudsync.query")}
+ return [alert for alert in alerts if alert.key in task_ids]
+
def lsjson_error_excerpt(error):
excerpt = error.split("\n")[0]
diff --git a/src/middlewared/middlewared/plugins/directoryservices_/health.py b/src/middlewared/middlewared/plugins/directoryservices_/health.py
index 2678dd66610bc..ac3855332843b 100644
--- a/src/middlewared/middlewared/plugins/directoryservices_/health.py
+++ b/src/middlewared/middlewared/plugins/directoryservices_/health.py
@@ -96,6 +96,14 @@ def check(self) -> bool:
if initial_status in (DSStatus.LEAVING, DSStatus.JOINING):
self.logger.debug("Deferring health check due to status of %s", initial_status.name)
return True
+ elif initial_status is None:
+ # Our directory service hasn't been initialized.
+ #
+ # We'll be optimistic and call it HEALTHY before we run the
+ # the actual health checks below. The reason for this is so that
+ # if we attempt to etc.generate files during health check a
+ # second call to directoryservices.status won't land us here again.
+ DSHealthObj.update(enabled_ds, DSStatus.HEALTHY, None)
try:
match enabled_ds:
diff --git a/src/middlewared/middlewared/plugins/docker/update.py b/src/middlewared/middlewared/plugins/docker/update.py
index 6747ac5f3d3dd..a1bc882e372bd 100644
--- a/src/middlewared/middlewared/plugins/docker/update.py
+++ b/src/middlewared/middlewared/plugins/docker/update.py
@@ -20,6 +20,7 @@ class Config:
datastore = 'services.docker'
datastore_extend = 'docker.config_extend'
cli_namespace = 'app.docker'
+ role_prefix = 'DOCKER'
ENTRY = Dict(
'docker_entry',
@@ -53,19 +54,19 @@ async def do_update(self, job, data):
config.update(data)
if old_config != config:
- if not config['pool']:
- try:
- await self.middleware.call('service.stop', 'docker')
- except Exception as e:
- raise CallError(f'Failed to stop docker service: {e}')
- await self.middleware.call('docker.state.set_status', Status.UNCONFIGURED.value)
+ try:
+ await self.middleware.call('service.stop', 'docker')
+ except Exception as e:
+ raise CallError(f'Failed to stop docker service: {e}')
+
+ await self.middleware.call('docker.state.set_status', Status.UNCONFIGURED.value)
await self.middleware.call('datastore.update', self._config.datastore, old_config['id'], config)
await self.middleware.call('docker.setup.status_change')
return await self.config()
- @accepts()
+ @accepts(roles=['DOCKER_READ'])
@returns(Dict(
Str('status', enum=[e.value for e in Status]),
Str('description'),
diff --git a/src/middlewared/middlewared/plugins/filesystem.py b/src/middlewared/middlewared/plugins/filesystem.py
index bb29ceec741fb..dfde7b4c57a84 100644
--- a/src/middlewared/middlewared/plugins/filesystem.py
+++ b/src/middlewared/middlewared/plugins/filesystem.py
@@ -12,6 +12,7 @@
from itertools import product
from middlewared.event import EventSource
from middlewared.plugins.pwenc import PWENC_FILE_SECRET, PWENC_FILE_SECRET_MODE
+from middlewared.plugins.docker.state_utils import IX_APPS_DIR_NAME
from middlewared.plugins.filesystem_ import chflags
from middlewared.schema import accepts, Bool, Dict, Float, Int, List, Ref, returns, Path, Str, UnixPerm
from middlewared.service import private, CallError, filterable_returns, filterable, Service, job
@@ -273,10 +274,6 @@ def listdir(self, path, filters, options):
if not path.is_dir():
raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)
- # TODO: once new apps implementation is in-place remove this check
- if 'ix-applications' in path.parts:
- raise CallError('Ix-applications is a system managed dataset and its contents cannot be listed')
-
file_type = None
for filter_ in filters:
if filter_[0] not in ['type']:
@@ -301,7 +298,7 @@ def listdir(self, path, filters, options):
# prevent shares from being configured to point to
# a path that doesn't exist on a zpool, we'll
# filter these here.
- filters.append(['is_mountpoint', '=', True])
+ filters.extend([['is_mountpoint', '=', True], ['name', '!=', IX_APPS_DIR_NAME]])
with DirectoryIterator(path, file_type=file_type) as d_iter:
return filter_list(d_iter, filters, options)
diff --git a/src/middlewared/middlewared/plugins/filesystem_/acl_template.py b/src/middlewared/middlewared/plugins/filesystem_/acl_template.py
index ca08b47554b0e..75061e1a55d41 100644
--- a/src/middlewared/middlewared/plugins/filesystem_/acl_template.py
+++ b/src/middlewared/middlewared/plugins/filesystem_/acl_template.py
@@ -206,7 +206,7 @@ async def append_builtins(self, data):
if 'ACTIVE_DIRECTORY' not in domain_info['domain_flags']['parsed']:
self.logger.warning(
'%s: domain is not identified properly as an Active Directory domain.',
- domain_info['alt_name']
+ domain_info['dns_name']
)
return
diff --git a/src/middlewared/middlewared/plugins/iscsi_/alua.py b/src/middlewared/middlewared/plugins/iscsi_/alua.py
index e6bd2a2399e76..ca98d7f8dd519 100644
--- a/src/middlewared/middlewared/plugins/iscsi_/alua.py
+++ b/src/middlewared/middlewared/plugins/iscsi_/alua.py
@@ -225,10 +225,12 @@ async def standby_after_start(self, job):
remote_requires_reload = False
# We are the STANDBY node. Tell the ACTIVE it can logout any HA targets it had left over.
+ prefix = await self.middleware.call('iscsi.target.ha_iqn_prefix')
while self.standby_starting:
try:
- iqns = await self.middleware.call('failover.call_remote', 'iscsi.target.logged_in_iqns')
- if not iqns:
+ iqns = (await self.middleware.call('failover.call_remote', 'iscsi.target.logged_in_iqns')).keys()
+ ha_iqns = list(filter(lambda iqn: iqn.startswith(prefix), iqns))
+ if not ha_iqns:
break
await self.middleware.call('failover.call_remote', 'iscsi.target.logout_ha_targets')
# If we have logged out targets on the ACTIVE node, then we will want to regenerate
diff --git a/src/middlewared/middlewared/plugins/iscsi_/targets.py b/src/middlewared/middlewared/plugins/iscsi_/targets.py
index 3bc5a0a4dd524..c6b82bea82514 100644
--- a/src/middlewared/middlewared/plugins/iscsi_/targets.py
+++ b/src/middlewared/middlewared/plugins/iscsi_/targets.py
@@ -595,15 +595,15 @@ async def logout_ha_targets(self, no_wait=False, raise_error=False):
When called on a HA BACKUP node will attempt to login to all internal HA targets,
used in ALUA.
"""
- iqns = await self.middleware.call('iscsi.target.active_ha_iqns')
+ ha_iqn_prefix_str = await self.middleware.call('iscsi.target.ha_iqn_prefix')
# Check what's already logged in
existing = await self.middleware.call('iscsi.target.logged_in_iqns')
# Generate the set of things we want to logout (don't assume every IQN, just the HA ones)
todo = set()
- for iqn in iqns.values():
- if iqn in existing:
+ for iqn in existing.keys():
+ if iqn.startswith(ha_iqn_prefix_str):
todo.add(iqn)
if todo:
@@ -788,11 +788,16 @@ def set_ha_targets_sys(self, iqn_prefix, param, text):
if targetname.read_text().startswith(iqn_prefix):
targetname.parent.joinpath(param).write_text(text)
+ @private
+ async def ha_iqn_prefix(self):
+ global_basename = (await self.middleware.call('iscsi.global.config'))['basename']
+ return f'{global_basename}:HA:'
+
@private
async def ha_iqn(self, name):
"""Return the IQN of the specified internal target."""
- global_basename = (await self.middleware.call('iscsi.global.config'))['basename']
- return f'{global_basename}:HA:{name}'
+ prefix = await self.middleware.call('iscsi.target.ha_iqn_prefix')
+ return f'{prefix}{name}'
@private
def iqn_ha_luns(self, iqn):
diff --git a/src/middlewared/middlewared/plugins/jbof/crud.py b/src/middlewared/middlewared/plugins/jbof/crud.py
index 598cb2d43d4b5..1bb6ddeb863a7 100644
--- a/src/middlewared/middlewared/plugins/jbof/crud.py
+++ b/src/middlewared/middlewared/plugins/jbof/crud.py
@@ -938,5 +938,13 @@ async def configure_job(self, job, reload_fenced=False):
return {'failed': failed, 'message': err}
+async def _clear_reboot_alerts(middleware, event_type, args):
+ await middleware.call('alert.oneshot_delete', 'JBOFTearDownFailure', None)
+
+
async def setup(middleware):
RedfishClient.setup()
+ # Deliberately do NOT handle the case where the system is already
+ # ready, as we only want the following to occur after a boot, not
+ # on a middlwared restart.
+ middleware.event_subscribe("system.ready", _clear_reboot_alerts)
diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/__init__.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_k8s_backups.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_k8s_backups.py
new file mode 100644
index 0000000000000..4ecc191653cea
--- /dev/null
+++ b/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_k8s_backups.py
@@ -0,0 +1,88 @@
+import os
+
+from middlewared.schema import accepts, Dict, returns, Str
+from middlewared.service import job, Service
+
+from .list_utils import get_backup_dir, get_default_release_details, K8s_BACKUP_NAME_PREFIX, release_details
+from .utils import get_k8s_ds
+
+
+class K8stoDockerMigrationService(Service):
+
+ class Config:
+ namespace = 'k8s_to_docker'
+ cli_namespace = 'k8s_to_docker'
+
+ @accepts(Str('kubernetes_pool'), roles=['DOCKER_READ'])
+ @returns(Dict(
+ 'backups',
+ Str('error', null=True),
+ Dict('backups', additional_attrs=True),
+ ))
+ @job(lock=lambda args: f'k8s_to_docker_list_backups_{args[0]}')
+ def list_backups(self, job, kubernetes_pool):
+ """
+ List existing kubernetes backups
+ """
+ backup_config = {
+ 'error': None,
+ 'backups': {},
+ }
+ k8s_ds = get_k8s_ds(kubernetes_pool)
+ if not self.middleware.call_sync('pool.dataset.query', [['id', '=', k8s_ds]]):
+ return backup_config | {'error': f'Unable to locate {k8s_ds!r} dataset'}
+
+ backup_base_dir = get_backup_dir(k8s_ds)
+ if not os.path.exists(backup_base_dir):
+ return backup_config | {'error': f'Unable to locate {backup_base_dir!r} backups directory'}
+
+ self.middleware.call_sync('catalog.sync').wait_sync()
+
+ backups = backup_config['backups']
+ snapshots = self.middleware.call_sync(
+ 'zfs.snapshot.query', [['name', '^', f'{k8s_ds}@{K8s_BACKUP_NAME_PREFIX}']], {'select': ['name']}
+ )
+ releases_datasets = set(
+ ds['id'].split('/', 3)[-1].split('/', 1)[0]
+ for ds in self.middleware.call_sync('zfs.dataset.get_instance', f'{k8s_ds}/releases')['children']
+ )
+ apps_mapping = self.middleware.call_sync('catalog.train_to_apps_version_mapping')
+ catalog_path = self.middleware.call_sync('catalog.config')['location']
+
+ for snapshot in snapshots:
+ backup_name = snapshot['name'].split('@', 1)[-1].split(K8s_BACKUP_NAME_PREFIX, 1)[-1]
+ backup_path = os.path.join(backup_base_dir, backup_name)
+ if not os.path.exists(backup_path):
+ continue
+
+ backup_data = {
+ 'name': backup_name,
+ 'releases': [],
+ 'skipped_releases': [],
+ 'snapshot_name': snapshot['name'],
+ 'created_on': self.middleware.call_sync(
+ 'zfs.snapshot.get_instance', snapshot['name']
+ )['properties']['creation']['parsed'],
+ 'backup_path': backup_path,
+ }
+
+ with os.scandir(backup_path) as entries:
+ for release in entries:
+ if release.name not in releases_datasets:
+ backup_data['skipped_releases'].append(get_default_release_details(release.name) | {
+ 'error': 'Release dataset not found in releases dataset',
+ })
+ continue
+
+ config = release_details(
+ release.name, release.path, catalog_path, apps_mapping
+ )
+ if config['error']:
+ backup_data['skipped_releases'].append(config)
+ else:
+ backup_data['releases'].append(config)
+
+ backups[backup_name] = backup_data
+
+ job.set_progress(100, 'Retrieved backup config')
+ return backup_config
diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_utils.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_utils.py
new file mode 100644
index 0000000000000..5b9e31adc15ef
--- /dev/null
+++ b/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_utils.py
@@ -0,0 +1,85 @@
+import os
+
+import yaml
+
+from catalog_reader.train_utils import get_train_path
+from middlewared.plugins.docker.state_utils import catalog_ds_path
+
+from .secrets_utils import list_secrets
+from .yaml import SerializedDatesFullLoader
+
+
+HELM_SECRET_PREFIX = 'sh.helm.release'
+K8s_BACKUP_NAME_PREFIX = 'ix-applications-backup-'
+
+
+def get_backup_dir(k8s_ds: str) -> str:
+ return os.path.join('/mnt', k8s_ds, 'backups')
+
+
+def get_release_metadata(release_path: str) -> dict:
+ try:
+ with open(os.path.join(release_path, 'namespace.yaml')) as f:
+ return yaml.load(f.read(), Loader=SerializedDatesFullLoader)
+ except (FileNotFoundError, yaml.YAMLError):
+ return {}
+
+
+def get_default_release_details(release_name: str) -> dict:
+ return {
+ 'error': None,
+ 'helm_secret': {},
+ 'release_secrets': {},
+ 'train': None,
+ 'app_name': None,
+ 'app_version': None,
+ 'release_name': release_name,
+ 'migrate_file_path': None,
+ }
+
+
+def release_details(release_name: str, release_path: str, catalog_path: str, apps_mapping: dict) -> dict:
+ config = get_default_release_details(release_name)
+ if not (release_metadata := get_release_metadata(release_path)) or not all(
+ k in release_metadata.get('metadata', {}).get('labels', {})
+ for k in ('catalog', 'catalog_branch', 'catalog_train')
+ ):
+ return config | {'error': 'Unable to read release metadata'}
+
+ metadata_labels = release_metadata['metadata']['labels']
+ if metadata_labels['catalog'] != 'TRUENAS' or metadata_labels['catalog_branch'] != 'master':
+ return config | {'error': 'Release is not from TrueNAS catalog'}
+
+ release_train = metadata_labels['catalog_train'] if metadata_labels['catalog_train'] != 'charts' else 'stable'
+ config['train'] = release_train
+ if release_train not in apps_mapping:
+ return config | {'error': 'Unable to locate release\'s train'}
+
+ secrets_dir = os.path.join(release_path, 'secrets')
+ try:
+ secrets = list_secrets(secrets_dir)
+ except FileNotFoundError:
+ return config | {'error': 'Unable to list release secrets'}
+
+ if secrets['helm_secret']['name'] is None:
+ return config | {'error': 'Unable to read helm secret details'}
+
+ config.update({
+ 'app_name': secrets['helm_secret']['name'],
+ **secrets,
+ })
+
+ if config['app_name'] not in apps_mapping[release_train]:
+ return config | {'error': 'Unable to locate release\'s app'}
+
+ config['app_version'] = apps_mapping[release_train][config['app_name']]['version']
+ migrate_tail_file_path = os.path.join(
+ release_train, config['app_name'], config['app_version'], 'migrations/migrate_from_kubernetes'
+ )
+ to_test_migrate_file_path = os.path.join(get_train_path(catalog_path), migrate_tail_file_path)
+ if os.path.exists(to_test_migrate_file_path):
+ config['migrate_file_path'] = os.path.join(get_train_path(catalog_ds_path()), migrate_tail_file_path)
+ else:
+ config['error'] = 'Unable to locate release\'s app\'s migration file'
+
+ return config
diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate.py
new file mode 100644
index 0000000000000..348df85cf9775
--- /dev/null
+++ b/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate.py
@@ -0,0 +1,191 @@
+import contextlib
+import os.path
+import shutil
+
+from middlewared.plugins.apps.ix_apps.path import get_app_parent_volume_ds_name, get_installed_app_path
+from middlewared.plugins.docker.state_utils import DATASET_DEFAULTS
+from middlewared.plugins.docker.utils import applications_ds_name
+from middlewared.schema import accepts, Dict, List, returns, Str
+from middlewared.service import CallError, InstanceNotFound, job, Service
+
+from .migrate_config_utils import migrate_chart_release_config
+
+
+class K8stoDockerMigrationService(Service):
+
+ class Config:
+ namespace = 'k8s_to_docker'
+ cli_namespace = 'k8s_to_docker'
+
+ @accepts(
+ Str('kubernetes_pool'),
+ Dict(
+ 'options',
+ Str('backup_name', required=True, empty=False),
+ ),
+ roles=['DOCKER_WRITE']
+ )
+ @returns(List(
+ 'app_migration_details',
+ items=[Dict(
+ 'app_migration_detail',
+ Str('name'),
+ Str('error', null=True),
+ )]
+ ))
+ @job(lock='k8s_to_docker_migrate')
+ def migrate(self, job, kubernetes_pool, options):
+ """
+ Migrate kubernetes backups to docker.
+ """
+ # The workflow for the migration would be
+ # 1) Ensuring the specified backup exists
+ # 2) Map apps which are supported atm and will actually reflect in the UI
+ # 3) Setup filesystem appropriately for docker
+ # 4) Migrate the config of apps
+ # 5) Create relevant filesystem bits for apps and handle cases like ix-volumes
+ # 6) Redeploy apps
+ backup_config_job = self.middleware.call_sync('k8s_to_docker.list_backups', kubernetes_pool)
+ backup_config_job.wait_sync()
+ if backup_config_job.error:
+ raise CallError(f'Failed to list backups: {backup_config_job.error}')
+
+ backups = backup_config_job.result
+ if backups['error']:
+ raise CallError(f'Failed to list backups for {kubernetes_pool!r}: {backups["error"]}')
+
+ if options['backup_name'] not in backups['backups']:
+ raise CallError(f'Backup {options["backup_name"]} not found')
+
+ backup_config = backups['backups'][options['backup_name']]
+ job.set_progress(10, f'Located {options["backup_name"]} backup')
+
+ if not backup_config['releases']:
+ raise CallError(f'No old apps found in {options["backup_name"]!r} backup which can be migrated')
+
+ # We will see if docker dataset exists on this pool and if it is there, we will error out
+ docker_ds = applications_ds_name(kubernetes_pool)
+ with contextlib.suppress(InstanceNotFound):
+ self.middleware.call_sync('pool.dataset.get_instance_quick', docker_ds)
+ raise CallError(f'Docker dataset {docker_ds!r} already exists on {kubernetes_pool!r}')
+
+ # For good measure we stop docker service and unset docker pool if any configured
+ self.middleware.call_sync('service.stop', 'docker')
+ job.set_progress(15, 'Un-configuring docker service if configured')
+ docker_job = self.middleware.call_sync('docker.update', {'pool': None})
+ docker_job.wait_sync()
+ if docker_job.error:
+ raise CallError(f'Failed to un-configure docker: {docker_job.error}')
+
+ # We will now configure docker service
+ docker_job = self.middleware.call_sync('docker.update', {'pool': kubernetes_pool})
+ docker_job.wait_sync()
+ if docker_job.error:
+ raise CallError(f'Failed to configure docker: {docker_job.error}')
+
+ self.middleware.call_sync('catalog.sync').wait_sync()
+
+ job.set_progress(25, f'Rolling back to {backup_config["snapshot_name"]!r} snapshot')
+ self.middleware.call_sync(
+ 'zfs.snapshot.rollback', backup_config['snapshot_name'], {
+ 'force': True,
+ 'recursive': True,
+ 'recursive_clones': True,
+ 'recursive_rollback': True,
+ }
+ )
+ job.set_progress(30, 'Starting migrating old apps to new apps')
+
+ # We will now iterate over each chart release which can be migrated and try to migrate it's config
+ # If we are able to migrate it's config, we will proceed with setting up relevant filesystem bits
+ # for the app and finally redeploy it
+ total_releases = len(backup_config['releases'])
+ app_percentage = ((70 - 30) / total_releases)
+ percentage = 30
+ release_details = []
+ migrate_context = {'gpu_choices': self.middleware.call_sync('app.gpu_choices_internal')}
+ dummy_job = type('dummy_job', (object,), {'set_progress': lambda *args: None})()
+ for chart_release in backup_config['releases']:
+ percentage += app_percentage
+ job.set_progress(percentage, f'Migrating {chart_release["release_name"]!r} app')
+
+ release_config = {
+ 'name': chart_release['release_name'],
+ 'error': 'Unable to complete migration',
+ }
+ release_details.append(release_config)
+ new_config = migrate_chart_release_config(chart_release | migrate_context)
+ if isinstance(new_config, str) or not new_config:
+ release_config['error'] = f'Failed to migrate config: {new_config}'
+ continue
+
+ complete_app_details = self.middleware.call_sync('catalog.get_app_details', chart_release['app_name'], {
+ 'train': chart_release['train'],
+ })
+
+ try:
+ self.middleware.call_sync(
+ 'app.create_internal', dummy_job, chart_release['release_name'],
+ chart_release['app_version'], new_config, complete_app_details, True,
+ )
+ except Exception as e:
+ release_config['error'] = f'Failed to create app: {e}'
+ continue
+
+ # At this point we have just not instructed docker to start the app and ix volumes normalization is left
+ release_user_config = chart_release['helm_secret']['config']
+ snapshot = backup_config['snapshot_name'].split('@')[-1]
+ available_snapshots = set()
+ for ix_volume in release_user_config.get('ixVolumes', []):
+ ds_name = ix_volume.get('hostPath', '')[5:] # remove /mnt/
+ ds_snap = f'{ds_name}@{snapshot}'
+ if not self.middleware.call_sync('zfs.snapshot.query', [['id', '=', ds_snap]]):
+ continue
+
+ available_snapshots.add(ds_snap)
+
+ if available_snapshots:
+ self.middleware.call_sync('app.schema.action.update_volumes', chart_release['release_name'], [])
+
+ try:
+ app_volume_ds = get_app_parent_volume_ds_name(
+ os.path.join(kubernetes_pool, 'ix-apps'), chart_release['release_name']
+ )
+ for snapshot in available_snapshots:
+ # We will do a zfs clone and promote here
+ destination_ds = os.path.join(app_volume_ds, snapshot.split('@')[0].split('/')[-1])
+ self.middleware.call_sync('zfs.snapshot.clone', {
+ 'snapshot': snapshot,
+ 'dataset_dst': destination_ds,
+ 'dataset_properties': {
+ k: v for k, v in DATASET_DEFAULTS.items() if k not in ['casesensitivity']
+ },
+ })
+ self.middleware.call_sync('zfs.dataset.promote', destination_ds)
+ self.middleware.call_sync('zfs.dataset.mount', destination_ds)
+ except CallError as e:
+ release_config['error'] = f'Failed to clone and promote ix-volumes: {e}'
+ # We do this to make sure it does not show up as installed in the UI
+ shutil.rmtree(get_installed_app_path(chart_release['release_name']), ignore_errors=True)
+ else:
+ release_config['error'] = None
+ self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True)
+
+ job.set_progress(75, 'Deploying migrated apps')
+
+ bulk_job = self.middleware.call_sync(
+ 'core.bulk', 'app.redeploy', [
+ [r['name']] for r in filter(lambda r: r['error'] is None, release_details)
+ ]
+ )
+ bulk_job.wait_sync()
+ if bulk_job.error:
+ raise CallError(f'Failed to redeploy apps: {bulk_job.error}')
+
+ for index, status in enumerate(bulk_job.result):
+ if status['error']:
+ release_details[index]['error'] = f'Failed to deploy app: {status["error"]}'
+
+ job.set_progress(100, 'Migration completed')
+
+ return release_details
diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate_config_utils.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate_config_utils.py
new file mode 100644
index 0000000000000..0a8008552921c
--- /dev/null
+++ b/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate_config_utils.py
@@ -0,0 +1,30 @@
+import tempfile
+
+import yaml
+
+from middlewared.plugins.apps.utils import run
+
+
+def migrate_chart_release_config(release_data: dict) -> dict | str:
+ with tempfile.NamedTemporaryFile(mode='w') as f:
+ f.write(yaml.dump(release_data))
+ f.flush()
+
+ cp = run([release_data['migrate_file_path'], f.name])
+ if cp.returncode:
+ return f'Failed to migrate config: {cp.stderr}'
+
+ if not cp.stdout:
+ error = 'No output from migration script'
+ else:
+ try:
+ new_config = yaml.safe_load(cp.stdout)
+ except yaml.YAMLError:
+ error = 'Failed to parse migrated config'
+ else:
+ if new_config:
+ return new_config
+ else:
+ error = 'No migrated config found'
+
+ return f'Failed to migrate config: {error}'
diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/secrets_utils.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/secrets_utils.py
new file mode 100644
index 0000000000000..14a314b5b026e
--- /dev/null
+++ b/src/middlewared/middlewared/plugins/kubernetes_to_docker/secrets_utils.py
@@ -0,0 +1,66 @@
+import binascii
+import contextlib
+import gzip
+import json
+import os
+from base64 import b64decode
+
+import yaml
+
+from .yaml import SerializedDatesFullLoader
+
+
+HELM_SECRET_PREFIX = 'sh.helm.release'
+
+
+def list_secrets(secrets_dir: str) -> dict[str, dict[str, dict]]:
+ secrets = {
+ 'helm_secret': {
+ 'secret_name': None,
+ 'name': None,
+ },
+ 'release_secrets': {},
+ }
+ with os.scandir(secrets_dir) as it:
+ for entry in it:
+ if not entry.is_file():
+ continue
+
+ if entry.name.startswith(HELM_SECRET_PREFIX):
+ if secrets['helm_secret']['secret_name'] is None or entry.name > secrets['helm_secret']['secret_name']:
+ secret_contents = get_secret_contents(entry.path, True).get('release', {})
+ secrets['helm_secret'].update({
+ 'secret_name': entry.name,
+ **(secret_contents if all(
+ k in secret_contents and k for k in ('appVersion', 'config', 'name')
+ ) else {}),
+ })
+ else:
+ secrets['release_secrets'][entry.name] = get_secret_contents(entry.path)
+
+ return secrets
+
+
+def get_secret_contents(secret_path: str, helm_secret: bool = False) -> dict:
+ with open(secret_path, 'r') as f:
+ secret = yaml.load(f.read(), Loader=SerializedDatesFullLoader)
+
+ if isinstance(secret.get('data'), dict) is False:
+ return {}
+
+ contents = {}
+ for k, v in secret['data'].items():
+ with contextlib.suppress(binascii.Error, gzip.BadGzipFile, KeyError):
+ if helm_secret:
+ v = json.loads(gzip.decompress(b64decode(b64decode(v))).decode())
+ for pop_k in ('manifest', 'info', 'version', 'namespace'):
+ v.pop(pop_k)
+ chart = v.pop('chart')['metadata']
+ for add_k in ('appVersion', 'name'):
+ v[add_k] = chart[add_k]
+ else:
+ v = b64decode(v).decode()
+
+ contents[k] = v
+
+ return contents
diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/utils.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/utils.py
new file mode 100644
index 0000000000000..069046c1a8da2
--- /dev/null
+++ b/src/middlewared/middlewared/plugins/kubernetes_to_docker/utils.py
@@ -0,0 +1,5 @@
+import os
+
+
+def get_k8s_ds(pool_name: str) -> str:
+ return os.path.join(pool_name, 'ix-applications')
diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/yaml.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/yaml.py
new file mode 100644
index 0000000000000..2b48f731db981
--- /dev/null
+++ b/src/middlewared/middlewared/plugins/kubernetes_to_docker/yaml.py
@@ -0,0 +1,37 @@
+import yaml
+
+
+class SerializedDatesFullLoader(yaml.FullLoader):
+ @classmethod
+ def remove_implicit_resolver(cls, tag_to_remove):
+ """
+ Remove implicit resolvers for a particular tag
+
+ Takes care not to modify resolvers in super classes.
+
+ We want to load datetimes as strings, not dates, because we
+ go on to serialise as json which doesn't have the advanced types
+ of yaml, and leads to incompatibilities down the track.
+ """
+ if 'yaml_implicit_resolvers' not in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+
+ for first_letter, mappings in cls.yaml_implicit_resolvers.items():
+ cls.yaml_implicit_resolvers[first_letter] = [
+ (tag, regexp) for tag, regexp in mappings if tag != tag_to_remove
+ ]
+
+
+class SafeDumper(yaml.SafeDumper):
+ pass
+
+
+SerializedDatesFullLoader.remove_implicit_resolver('tag:yaml.org,2002:timestamp')
+
+# We would like to customize safe dumper here so that when it dumps values, we quote strings
+# why we want to do this is for instances when strings like 'y' are treated as boolean true
+# by yaml and if we don't dump this enclosed with quotes, helm treats 'y' as true and we get inconsistent
+# usage
+yaml.add_representer(
+ str, lambda dumper, data: dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"'), SafeDumper
+)
diff --git a/src/middlewared/middlewared/plugins/network.py b/src/middlewared/middlewared/plugins/network.py
index 5bac389fdb42b..4486b160b4271 100644
--- a/src/middlewared/middlewared/plugins/network.py
+++ b/src/middlewared/middlewared/plugins/network.py
@@ -1922,9 +1922,9 @@ def ip_in_use(self, choices):
@private
def get_nic_names(self) -> set:
"""Get network interface names excluding internal interfaces"""
- res, ignore = set(), set(self.middleware.call_sync('interface.internal_interfaces'))
+ res, ignore = set(), tuple(self.middleware.call_sync('interface.internal_interfaces'))
with scandir('/sys/class/net/') as nics:
- for nic in filter(lambda x: x.is_symlink() and x.name not in ignore, nics):
+ for nic in filter(lambda x: x.is_symlink() and not x.name.startswith(ignore), nics):
res.add(nic.name)
return res
diff --git a/src/middlewared/middlewared/plugins/pool_/dataset.py b/src/middlewared/middlewared/plugins/pool_/dataset.py
index f1d519faf6dae..5dc8a0ac1ace8 100644
--- a/src/middlewared/middlewared/plugins/pool_/dataset.py
+++ b/src/middlewared/middlewared/plugins/pool_/dataset.py
@@ -166,6 +166,7 @@ async def internal_datasets_filters(self):
['pool', '!=', await self.middleware.call('boot.pool_name')],
['id', 'rnin', '/.system'],
['id', 'rnin', '/ix-applications/'],
+ ['id', 'rnin', '/ix-apps/'],
]
@private
diff --git a/src/middlewared/middlewared/plugins/pool_/export.py b/src/middlewared/middlewared/plugins/pool_/export.py
index f694c371459b7..7ee25f76230e8 100644
--- a/src/middlewared/middlewared/plugins/pool_/export.py
+++ b/src/middlewared/middlewared/plugins/pool_/export.py
@@ -17,7 +17,7 @@ class Config:
def cleanup_after_export(self, poolinfo, opts):
try:
if all((opts['destroy'], opts['cascade'])) and (contents := os.listdir(poolinfo['path'])):
- if len(contents) == 1 and contents[0] == 'ix-applications':
+ if len(contents) == 1 and contents[0] in ('ix-applications', 'ix-apps'):
# This means:
# 1. zpool was destroyed (disks were wiped)
# 2. end-user chose to delete all share configuration associated
diff --git a/src/middlewared/middlewared/plugins/pool_/import_pool.py b/src/middlewared/middlewared/plugins/pool_/import_pool.py
index a329d27a46e84..13e4243a4d414 100644
--- a/src/middlewared/middlewared/plugins/pool_/import_pool.py
+++ b/src/middlewared/middlewared/plugins/pool_/import_pool.py
@@ -129,9 +129,10 @@ async def import_pool(self, job, data):
# Recursively reset dataset mountpoints for the zpool.
recursive = True
for child in await self.middleware.call('zfs.dataset.child_dataset_names', pool_name):
- if child == os.path.join(pool_name, 'ix-applications'):
+ if child in (os.path.join(pool_name, k) for k in ('ix-applications', 'ix-apps')):
# We exclude `ix-applications` dataset since resetting it will
# cause PVC's to not mount because "mountpoint=legacy" is expected.
+ # We exclude `ix-apps` dataset since it has a custom mountpoint in place
continue
try:
# Reset all mountpoints
diff --git a/src/middlewared/middlewared/plugins/service_/services/sssd.py b/src/middlewared/middlewared/plugins/service_/services/sssd.py
index d6ea834af0a42..01cbed67cebce 100644
--- a/src/middlewared/middlewared/plugins/service_/services/sssd.py
+++ b/src/middlewared/middlewared/plugins/service_/services/sssd.py
@@ -5,3 +5,6 @@ class SSSDService(SimpleService):
name = "sssd"
systemd_unit = "sssd"
+
+ async def before_start(self):
+ await self.middleware.call('ldap.create_sssd_dirs')
diff --git a/src/middlewared/middlewared/plugins/smb.py b/src/middlewared/middlewared/plugins/smb.py
index 752297a7aa99b..c5f8fb58ba3a9 100644
--- a/src/middlewared/middlewared/plugins/smb.py
+++ b/src/middlewared/middlewared/plugins/smb.py
@@ -624,7 +624,7 @@ async def validate_smb(self, new, verrors):
List('bindip', items=[IPAddr('ip')]),
Str('smb_options', max_length=None),
update=True,
- ))
+ ), audit='Update SMB configuration')
@pass_app(rest=True)
async def do_update(self, app, data):
"""
@@ -818,7 +818,8 @@ class Config:
List('ignore_list', default=NOT_PROVIDED)
),
register=True
- )
+ ),
+ audit='SMB share create', audit_extended=lambda data: data['name']
)
@pass_app(rest=True)
async def do_create(self, app, data):
@@ -967,17 +968,19 @@ async def apply_share_changes(self, old_is_locked, new_is_locked, oldname, newna
'sharingsmb_create',
'sharingsmb_update',
('attr', {'update': True})
- )
+ ),
+ audit='SMB share update',
+ audit_callback=True,
)
@pass_app(rest=True)
- async def do_update(self, app, id_, data):
+ async def do_update(self, app, audit_callback, id_, data):
"""
Update SMB Share of `id`.
"""
- ha_mode = SMBHAMODE[(await self.middleware.call('smb.get_smb_ha_mode'))]
+ old = await self.get_instance(id_)
+ audit_callback(old['name'])
verrors = ValidationErrors()
- old = await self.get_instance(id_)
old_audit = old['audit']
new = old.copy()
@@ -1070,13 +1073,15 @@ async def do_update(self, app, id_, data):
return await self.get_instance(id_)
- @accepts(Int('id'))
- async def do_delete(self, id_):
+ @accepts(Int('id'), audit='SMB share delete', audit_callback=True)
+ async def do_delete(self, audit_callback, id_):
"""
Delete SMB Share of `id`. This will forcibly disconnect SMB clients
that are accessing the share.
"""
share = await self.get_instance(id_)
+ audit_callback(share['name'])
+
result = await self.middleware.call('datastore.delete', self._config.datastore, id_)
share_name = 'homes' if share['home'] else share['name']
@@ -1562,7 +1567,7 @@ async def presets(self):
),
], default=[{'ae_who_sid': 'S-1-1-0', 'ae_perm': 'FULL', 'ae_type': 'ALLOWED'}]),
register=True
- ), roles=['SHARING_SMB_WRITE'])
+ ), roles=['SHARING_SMB_WRITE'], audit='Setacl SMB share', audit_extended=lambda data: data['share_name'])
@returns(Ref('smb_share_acl'))
async def setacl(self, data):
"""
@@ -1673,7 +1678,10 @@ async def setacl(self, data):
})
return await self.getacl({'share_name': data['share_name']})
- @accepts(Dict('smb_getacl', Str('share_name', required=True)), roles=['SHARING_SMB_READ'])
+ @accepts(Dict(
+ 'smb_getacl',
+ Str('share_name', required=True)
+ ), roles=['SHARING_SMB_READ'], audit='Getacl SMB share', audit_extended=lambda data: data['share_name'])
@returns(Ref('smb_share_acl'))
async def getacl(self, data):
verrors = ValidationErrors()
diff --git a/src/middlewared/middlewared/plugins/smb_/util_sd.py b/src/middlewared/middlewared/plugins/smb_/util_sd.py
index e5d34c6970b42..8cc70e8b9a2cc 100644
--- a/src/middlewared/middlewared/plugins/smb_/util_sd.py
+++ b/src/middlewared/middlewared/plugins/smb_/util_sd.py
@@ -145,6 +145,7 @@ class Config:
service = 'cifs'
service_verb = 'restart'
+ @private
@accepts(
Dict(
'get_remote_acl',
diff --git a/src/middlewared/middlewared/role.py b/src/middlewared/middlewared/role.py
index ba8b75a813936..8bc18770f38e5 100644
--- a/src/middlewared/middlewared/role.py
+++ b/src/middlewared/middlewared/role.py
@@ -2,6 +2,7 @@
from dataclasses import dataclass, field
import typing
+
@dataclass()
class Role:
"""
@@ -93,6 +94,14 @@ class Role:
'CERTIFICATE_AUTHORITY_READ': Role(),
'CERTIFICATE_AUTHORITY_WRITE': Role(includes=['CERTIFICATE_AUTHORITY_READ']),
+ # Apps roles
+ 'CATALOG_READ': Role(),
+ 'CATALOG_WRITE': Role(includes=['CATALOG_READ']),
+ 'DOCKER_READ': Role(includes=[]),
+ 'DOCKER_WRITE': Role(includes=['DOCKER_READ']),
+ 'APPS_READ': Role(includes=['CATALOG_READ']),
+ 'APPS_WRITE': Role(includes=['CATALOG_WRITE', 'APPS_READ']),
+
# iSCSI roles
'SHARING_ISCSI_AUTH_READ': Role(),
'SHARING_ISCSI_AUTH_WRITE': Role(includes=['SHARING_ISCSI_AUTH_READ']),
@@ -170,9 +179,6 @@ class Role:
'FILESYSTEM_ATTRS_WRITE',
'SERVICE_READ'],
builtin=False),
- # Apps roles
- 'CATALOG_READ': Role(),
- 'CATALOG_WRITE': Role(includes=['CATALOG_READ']),
# System settings
'SYSTEM_GENERAL_READ': Role(),
diff --git a/src/middlewared/middlewared/test/integration/assets/filesystem.py b/src/middlewared/middlewared/test/integration/assets/filesystem.py
index e0871dbd06e87..9262a0e2bbcae 100644
--- a/src/middlewared/middlewared/test/integration/assets/filesystem.py
+++ b/src/middlewared/middlewared/test/integration/assets/filesystem.py
@@ -11,3 +11,26 @@ def directory(path, options=None):
yield path
finally:
ssh(f'rm -rf {path}')
+
+
+@contextlib.contextmanager
+def mkfile(path, size=None):
+ """
+ Create a simple file
+ * path is the full-pathname. e.g. /mnt/tank/dataset/filename
+ * If size is None then use 'touch',
+ else create a random filled file of size bytes.
+ Creation will be faster if size is a power of 2, e.g. 1024 or 1048576
+ TODO: sparse files, owner, permissions
+ """
+ try:
+ if size is None:
+ ssh(f"touch {path}")
+ else:
+ t = 1048576
+ while t > 1 and size % t != 0:
+ t = t // 2
+ ssh(f"dd if=/dev/urandom of={path} bs={t} count={size // t}")
+ yield path
+ finally:
+ ssh(f"rm -f {path}")
diff --git a/src/middlewared/middlewared/validators.py b/src/middlewared/middlewared/validators.py
index d4c1ee04eb911..bc70795618fbd 100644
--- a/src/middlewared/middlewared/validators.py
+++ b/src/middlewared/middlewared/validators.py
@@ -400,7 +400,6 @@ def check_path_resides_within_volume_sync(verrors, schema_name, path, vol_names)
* path is within /mnt
* path is located within one of the specified `vol_names`
* path is not explicitly a `.zfs` or `.zfs/snapshot` directory
- * path is not ix-applications dataset
"""
if path_location(path).name == 'EXTERNAL':
# There are some fields where we allow external paths
@@ -427,17 +426,3 @@ def check_path_resides_within_volume_sync(verrors, schema_name, path, vol_names)
"are not permitted paths. If a snapshot within this directory must "
"be accessed through the path-based API, then it should be called "
"directly, e.g. '/mnt/dozer/.zfs/snapshot/mysnap'.")
-
- for check_path, svc_name in (('ix-applications', 'Applications'),):
- in_use = False
- if is_mountpoint and rp.name == check_path:
- in_use = True
- else:
- # subtract 2 here to remove the '/' and 'mnt' parents
- for i in range(0, len(rp.parents) - 2):
- p = rp.parents[i]
- if p.is_mount() and p.name == check_path:
- in_use = True
- break
- if in_use:
- verrors.add(schema_name, f'A path being used by {svc_name} is not allowed')
diff --git a/tests/api2/test_005_interface.py b/tests/api2/test_005_interface.py
index d92d30769ae03..4c8c0cbd87b6e 100644
--- a/tests/api2/test_005_interface.py
+++ b/tests/api2/test_005_interface.py
@@ -124,10 +124,10 @@ def test_003_recheck_ipvx(request):
def test_004_remove_critical_failover_group(request):
with pytest.raises(ValidationErrors) as ve:
call('interface.update', interface, {'failover_group': None, 'failover_critical': True})
- assert ve.value.errors == [
- ValidationError(
- 'interface_update.failover_group',
- 'A failover group is required when configuring a critical failover interface.',
- errno.EINVAL
- )
- ]
+ assert ve.value.errors == [
+ ValidationError(
+ 'interface_update.failover_group',
+ 'A failover group is required when configuring a critical failover interface.',
+ errno.EINVAL
+ )
+ ]
diff --git a/tests/api2/test_261_iscsi_cmd.py b/tests/api2/test_261_iscsi_cmd.py
index 4fb7ac70245b5..045a927fb9d70 100644
--- a/tests/api2/test_261_iscsi_cmd.py
+++ b/tests/api2/test_261_iscsi_cmd.py
@@ -200,13 +200,23 @@ def zvol_resize(zvol, volsize):
call('pool.dataset.update', zvol, payload)
-def get_iscsi_sessions(filters=None, check_length=None):
+def _get_iscsi_sessions(filters=None):
if filters:
- data = call('iscsi.global.sessions', filters)
+ return call('iscsi.global.sessions', filters)
else:
- data = call('iscsi.global.sessions')
+ return call('iscsi.global.sessions')
+
+
+def get_iscsi_sessions(filters=None, check_length=None):
if isinstance(check_length, int):
+ for _ in range(10):
+ data = _get_iscsi_sessions(filters)
+ if len(data) == check_length:
+ return data
+ sleep(1)
assert len(data) == check_length, data
+ else:
+ data = _get_iscsi_sessions(filters)
return data
@@ -536,7 +546,6 @@ def target_test_readwrite16(ip, iqn):
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
- s.blocksize = 512
# First let's write zeros to the first 12 blocks using WRITE SAME (16)
s.writesame16(0, 12, zeros)
@@ -561,7 +570,6 @@ def target_test_readwrite16(ip, iqn):
# Drop the iSCSI connection and login again
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
- s.blocksize = 512
# Check results using READ (16)
for lba in range(0, 12):
@@ -763,7 +771,6 @@ def target_test_snapshot_single_login(ip, iqn, dataset_id):
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
- s.blocksize = 512
# First let's write zeros to the first 12 blocks using WRITE SAME (16)
s.writesame16(0, 12, zeros)
@@ -834,7 +841,6 @@ def target_test_snapshot_multiple_login(ip, iqn, dataset_id):
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
- s.blocksize = 512
# First let's write zeros to the first 12 blocks using WRITE SAME (16)
s.writesame16(0, 12, zeros)
@@ -849,7 +855,6 @@ def target_test_snapshot_multiple_login(ip, iqn, dataset_id):
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
- s.blocksize = 512
# Now let's write DEADBEEF to a few LBAs using WRITE (16)
for lba in deadbeef_lbas:
@@ -868,7 +873,6 @@ def target_test_snapshot_multiple_login(ip, iqn, dataset_id):
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
- s.blocksize = 512
# Do a WRITE for > 1 LBA
s.write16(10, 2, deadbeef * 2)
@@ -886,7 +890,6 @@ def target_test_snapshot_multiple_login(ip, iqn, dataset_id):
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
- s.blocksize = 512
# Check results using READ (16)
for lba in range(0, 12):
@@ -901,7 +904,6 @@ def target_test_snapshot_multiple_login(ip, iqn, dataset_id):
with iscsi_scsi_connection(ip, iqn) as s:
TUR(s)
- s.blocksize = 512
# Check results using READ (16)
for lba in range(0, 12):
r = s.read16(lba, 1)
@@ -1465,6 +1467,37 @@ def _pr_expect_reservation_conflict(s):
raise e
+def _check_target_rw_paths(s1, s2):
+ """
+ Check that the two supplied paths can read/write data, and they point at the same LUN.
+ """
+ zeros = bytearray(512)
+ deadbeef = bytearray.fromhex('deadbeef') * 128
+ abba = bytearray.fromhex('abbaabba') * 128
+
+ # First let's write zeros to the first 12 blocks using WRITE SAME (16)
+ s1.writesame16(0, 12, zeros)
+
+ # Check results using READ (16)
+ for s in (s1, s2):
+ for lba in range(0, 12):
+ r = s.read16(lba, 1)
+ assert r.datain == zeros, r.datain
+
+ # Update some blocks from each initiator using WRITE SAME
+ s1.writesame16(0, 6, deadbeef)
+ s2.writesame16(6, 6, abba)
+
+ # Check results using READ (16)
+ for s in (s1, s2):
+ for lba in range(0, 6):
+ r = s.read16(lba, 1)
+ assert r.datain == deadbeef, r.datain
+ for lba in range(6, 12):
+ r = s.read16(lba, 1)
+ assert r.datain == abba, r.datain
+
+
def _check_persistent_reservations(s1, s2):
#
# First just do a some basic tests (register key, reserve, release, unregister key)
@@ -1592,11 +1625,9 @@ def test_18_persistent_reservation_two_initiators(request):
with configured_target_to_zvol_extent(config, target_name, zvol):
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s1:
- s1.blocksize = 512
TUR(s1)
initiator_name2 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:second"
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_name2) as s2:
- s2.blocksize = 512
TUR(s2)
_check_persistent_reservations(s1, s2)
@@ -1730,6 +1761,17 @@ def _get_active_target_portal_group():
return None
+def _wait_for_alua_settle(retries=20):
+ print("Checking ALUA status...")
+ while retries:
+ if call('iscsi.alua.settled'):
+ print("ALUA is settled")
+ break
+ retries -= 1
+ print("Waiting for ALUA to settle")
+ sleep(5)
+
+
def _ha_reboot_master(delay=900):
"""
Reboot the MASTER node and wait for both the new MASTER
@@ -1800,15 +1842,7 @@ def _ha_reboot_master(delay=900):
raise RuntimeError('Failover never completed.')
# Finally check the ALUA status
- print("Checking ALUA status...")
- retries = 12
- while retries:
- if call('iscsi.alua.settled'):
- print("ALUA is settled")
- break
- retries -= 1
- print("Waiting for ALUA to settle")
- sleep(5)
+ _wait_for_alua_settle()
def _ensure_alua_state(state):
@@ -1821,6 +1855,13 @@ def _ensure_alua_state(state):
def test_19_alua_config(request):
"""
Test various aspects of ALUA configuration.
+
+ When run against a HA system this test will perform TWO reboots to
+ test failover wrt iSCSI ALUA targets.
+
+ The second reboot was added to return the system to the original ACTIVE
+ node. This means that subsequent tests will run on the same node that
+ the previous tests started on, thereby simplifying log analysis.
"""
# First ensure ALUA is off
_ensure_alua_state(False)
@@ -1848,6 +1889,7 @@ def test_19_alua_config(request):
with alua_enabled():
_ensure_alua_state(True)
+ _wait_for_alua_settle()
# We will login to the target on BOTH controllers and make sure
# we see the same target. Observe that we supply tpgs=1 as
@@ -1901,6 +1943,8 @@ def test_19_alua_config(request):
_verify_ha_report_target_port_groups(s1, tpgs, active_tpg)
_verify_ha_report_target_port_groups(s2, tpgs, active_tpg)
+ _check_target_rw_paths(s1, s2)
+
# Let's failover
_ha_reboot_master()
expect_check_condition(s1, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
@@ -1916,6 +1960,60 @@ def test_19_alua_config(request):
_verify_ha_report_target_port_groups(s1, tpgs, new_active_tpg)
_verify_ha_report_target_port_groups(s2, tpgs, new_active_tpg)
+ _check_target_rw_paths(s1, s2)
+
+ # Create a new target
+ with configured_target_to_zvol_extent(config, f'{target_name}b', zvol) as iscsi_config2:
+ iqn2 = f'{basename}:{target_name}b'
+ api_serial_number2 = iscsi_config2['extent']['serial']
+ api_naa2 = iscsi_config2['extent']['naa']
+ tpgs2 = {
+ CONTROLLER_A_TARGET_PORT_GROUP_ID: [1, 2],
+ CONTROLLER_B_TARGET_PORT_GROUP_ID: [32001, 32002]
+ }
+ # Wait until ALUA settles, so that we know the target is available on the STANDBY node.
+ _wait_for_alua_settle()
+ # Login to the target on each controller
+ with iscsi_scsi_connection(truenas_server.nodea_ip, iqn2) as s3:
+ _verify_ha_inquiry(s3, api_serial_number2, api_naa2, 1)
+ initiator_name3 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:third"
+ with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn2, initiator_name=initiator_name3) as s4:
+ _verify_ha_inquiry(s4, api_serial_number2, api_naa2, 1)
+ _verify_ha_device_identification(s3, api_naa2, 2, CONTROLLER_A_TARGET_PORT_GROUP_ID)
+ _verify_ha_device_identification(s4, api_naa2, 32002, CONTROLLER_B_TARGET_PORT_GROUP_ID)
+ _verify_ha_report_target_port_groups(s3, tpgs2, new_active_tpg)
+ _verify_ha_report_target_port_groups(s4, tpgs2, new_active_tpg)
+ _check_target_rw_paths(s3, s4)
+
+ # Reboot again (to failback to the original ACTIVE node)
+ _ha_reboot_master()
+ for s in [s1, s2, s3, s4]:
+ expect_check_condition(s, sense_ascq_dict[0x2900]) # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
+
+ # After the 2nd reboot we will switch back to using the original active_tpg
+
+ # Check the new target again
+ _verify_ha_inquiry(s3, api_serial_number2, api_naa2, 1)
+ _verify_ha_inquiry(s4, api_serial_number2, api_naa2, 1)
+ _verify_ha_device_identification(s3, api_naa2, 2, CONTROLLER_A_TARGET_PORT_GROUP_ID)
+ _verify_ha_device_identification(s4, api_naa2, 32002, CONTROLLER_B_TARGET_PORT_GROUP_ID)
+ _verify_ha_report_target_port_groups(s3, tpgs2, active_tpg)
+ _verify_ha_report_target_port_groups(s4, tpgs2, active_tpg)
+ _check_target_rw_paths(s3, s4)
+
+ # Check the original target
+ _verify_ha_inquiry(s1, api_serial_number, api_naa, 1)
+ _verify_ha_inquiry(s2, api_serial_number, api_naa, 1)
+ _verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID)
+ _verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID)
+ _verify_ha_report_target_port_groups(s1, tpgs2, active_tpg)
+ _verify_ha_report_target_port_groups(s2, tpgs2, active_tpg)
+ _check_target_rw_paths(s1, s2)
+ # Second target has been removed again
+ _wait_for_alua_settle()
+ _verify_ha_report_target_port_groups(s1, tpgs, active_tpg)
+ _verify_ha_report_target_port_groups(s2, tpgs, active_tpg)
+
# Ensure ALUA is off again
_ensure_alua_state(False)
@@ -1977,11 +2075,9 @@ def test_21_alua_persistent_reservation_two_initiators(request):
iqn = f'{basename}:{target_name}'
# Login to the target on each controller
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1:
- s1.blocksize = 512
TUR(s1)
initiator_name2 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:second"
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn, initiator_name=initiator_name2) as s2:
- s2.blocksize = 512
TUR(s2)
_check_persistent_reservations(s1, s2)
# Do it all again, the other way around
@@ -2084,9 +2180,7 @@ def test_22_extended_copy(request, extent1, extent2):
with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
s1.testunitready()
- s1.blocksize = 512
s2.testunitready()
- s2.blocksize = 512
_xcopy_test(s1, s2)
@@ -2110,13 +2204,9 @@ def test_23_ha_extended_copy(request, extent1, extent2):
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn1) as sb1:
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn2) as sb2:
sa1.testunitready()
- sa1.blocksize = 512
sa2.testunitready()
- sa2.blocksize = 512
sb1.testunitready()
- sb1.blocksize = 512
sb2.testunitready()
- sb2.blocksize = 512
_xcopy_test(sa1, sa2, sb1, sb2)
# Now re-run the test using the other controller
_xcopy_test(sb1, sb2, sa1, sa2)
@@ -2208,7 +2298,6 @@ def test_25_resize_target_zvol(request):
iqn = f'{basename}:{target_name}'
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
- s.blocksize = 512
assert MB_100 == read_capacity16(s)
# Have checked using tcpdump/wireshark that a SCSI Asynchronous Event Notification
# gets sent 0x2A09: "CAPACITY DATA HAS CHANGED"
@@ -2244,7 +2333,6 @@ def test_26_resize_target_file(request):
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
extent_id = config['extent']['id']
TUR(s)
- s.blocksize = 512
assert MB_100 == read_capacity16(s)
file_extent_resize(extent_id, MB_256)
assert MB_256 == read_capacity16(s)
@@ -2276,7 +2364,6 @@ def test_27_initiator_group(request):
# Ensure we can access from all initiators
for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
- s.blocksize = 512
TUR(s)
# Now set the initiator id to the empty (Allow All Initiators) one
@@ -2285,7 +2372,6 @@ def test_27_initiator_group(request):
set_target_initiator_id(config['target']['id'], config['initiator']['id'])
for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
- s.blocksize = 512
TUR(s)
# Now create another initiator group, which contains the first two
@@ -2295,12 +2381,10 @@ def test_27_initiator_group(request):
# First two initiators can connect to the target
for initiator_iqn in [initiator_iqn1, initiator_iqn2]:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
- s.blocksize = 512
TUR(s)
# Third initiator cannot connect to the target
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn3) as s:
- s.blocksize = 512
TUR(s)
assert 'Unable to connect to' in str(ve), ve
# Clear it again
@@ -2308,7 +2392,6 @@ def test_27_initiator_group(request):
for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]:
with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
- s.blocksize = 512
TUR(s)
@@ -2326,7 +2409,6 @@ def test_28_portal_access(request):
with configured_target_to_zvol_extent(config1, target_name, zvol, volsize=MB_100):
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
TUR(s)
- s.blocksize = 512
assert MB_100 == read_capacity16(s)
# Now, if we are in a HA config turn on ALUA and test
# the specific IP addresses
@@ -2336,18 +2418,15 @@ def test_28_portal_access(request):
with pytest.raises(RuntimeError) as ve:
with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
- s.blocksize = 512
TUR(s)
assert 'Unable to connect to' in str(ve), ve
with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s:
TUR(s)
- s.blocksize = 512
assert MB_100 == read_capacity16(s)
with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s:
TUR(s)
- s.blocksize = 512
assert MB_100 == read_capacity16(s)
@@ -2372,11 +2451,9 @@ def test_29_multiple_extents():
with target_extent_associate(target_id, extent2_config['id'], 1):
with iscsi_scsi_connection(truenas_server.ip, iqn, 0) as s:
TUR(s)
- s.blocksize = 512
assert MB_100 == read_capacity16(s)
with iscsi_scsi_connection(truenas_server.ip, iqn, 1) as s:
TUR(s)
- s.blocksize = 512
assert MB_256 == read_capacity16(s)
# Now try to create another extent using the same serial number
diff --git a/tests/api2/test_300_nfs.py b/tests/api2/test_300_nfs.py
index c4b46056e3a87..50f0dfcaca54b 100644
--- a/tests/api2/test_300_nfs.py
+++ b/tests/api2/test_300_nfs.py
@@ -1459,18 +1459,24 @@ def test_48_syslog_filters():
# Add dummy entries to avoid false positives
for i in range(10):
ssh(f'logger "====== {i}: NFS test_48_syslog_filters (with) ======"')
+
+ # Sometimes the mount messages in syslog can take over a minute to appear.
+ # Sometimes the messages are output nearly immediately.
+ # We have syslog already configured to output nearly immediately.
+ # This retry loop is to prevent false failures on the slow response condition
+ # and not time penalize the quick response condition
with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, user=user, password=password, ip=truenas_server.ip):
- num_tries = 10
+ # Increase num_tries if necessary
+ num_tries = tries_remaining = 12
found = False
res = ""
- while not found and num_tries > 0:
- numlines = 3 * (10 - num_tries) + 5
- res = ssh(f"tail -{numlines} /var/log/syslog")
+ while not found and tries_remaining > 0:
+ res = ssh("tail -30 /var/log/syslog")
if "rpc.mountd" in res:
found = True
break
- num_tries -= 1
- sleep(10 - num_tries)
+ tries_remaining -= 1
+ sleep(num_tries - tries_remaining)
assert found, f"Expected to find 'rpc.mountd' in the output but found:\n{res}"
@@ -1630,6 +1636,8 @@ def test_52_manage_gids(state, expected):
if state is not None:
sleep(3) # In Cobia: Prevent restarting NFS too quickly.
call("nfs.update", {"userd_manage_gids": state})
+ # Allow config file to be updated
+ sleep(1)
s = parse_server_config()
assert s['mountd']['manage-gids'] == expected, str(s)
@@ -1655,6 +1663,8 @@ def test_54_v4_domain():
# Make a setting change and confirm
db = call('nfs.update', {"v4_domain": "ixsystems.com"})
assert db['v4_domain'] == 'ixsystems.com', f"v4_domain failed to be updated in nfs DB: {db}"
+ # Allow config file to be updated
+ sleep(1)
s = parse_server_config("idmapd")
assert s['General'].get('Domain') == 'ixsystems.com', f"'Domain' failed to be updated in idmapd.conf: {s}"
diff --git a/tests/api2/test_440_snmp.py b/tests/api2/test_440_snmp.py
index 4ce613203995a..eaba458e119dd 100644
--- a/tests/api2/test_440_snmp.py
+++ b/tests/api2/test_440_snmp.py
@@ -6,7 +6,10 @@
from time import sleep
+from contextlib import ExitStack
from middlewared.service_exception import ValidationErrors
+from middlewared.test.integration.assets.pool import dataset, snapshot
+from middlewared.test.integration.assets.filesystem import directory, mkfile
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.utils.client import truenas_server
from middlewared.test.integration.utils.system import reset_systemd_svcs
@@ -14,7 +17,7 @@
ObjectType, SnmpEngine, UdpTransportTarget, getCmd)
-from auto_config import ha, interface, password, user
+from auto_config import ha, interface, password, user, pool_name
from functions import async_SSH_done, async_SSH_start
skip_ha_tests = pytest.mark.skipif(not (ha and "virtual_ip" in os.environ), reason="Skip HA tests")
@@ -97,6 +100,68 @@ def add_SNMPv3_user():
yield
+@pytest.fixture(scope='function')
+def create_nested_structure():
+ """
+ Create the following structure:
+ tank -+-> dataset_1 -+-> dataset_2 -+-> dataset_3
+ |-> zvol_1a |-> zvol-L_2a |-> zvol L_3a
+ |-> zvol_1b |-> zvol-L_2b |-> zvol L_3b
+ |-> file_1 |-> file_2 |-> file_3
+ |-> dir_1 |-> dir_2 |-> dir_3
+ TODO: Make this generic and move to assets
+ """
+ ds_path = ""
+ ds_list = []
+ zv_list = []
+ dir_list = []
+ file_list = []
+ # Test '-' and ' ' in the name (we skip index 0)
+ zvol_name = ["bogus", "zvol", "zvol-L", "zvol L"]
+ with ExitStack() as es:
+
+ for i in range(1, 4):
+ preamble = f"{ds_path + '/' if i > 1 else ''}"
+ vol_path = f"{preamble}{zvol_name[i]}_{i}"
+
+ # Create zvols
+ for c in crange('a', 'b'):
+ zv = es.enter_context(dataset(vol_path + c, {"type": "VOLUME", "volsize": 1048576}))
+ zv_list.append(zv)
+
+ # Create directories
+ d = es.enter_context(directory(f"/mnt/{pool_name}/{preamble}dir_{i}"))
+ dir_list.append(d)
+
+ # Create files
+ f = es.enter_context(mkfile(f"/mnt/{pool_name}/{preamble}file_{i}", 1048576))
+ file_list.append(f)
+
+ # Create datasets
+ ds_path += f"{'/' if i > 1 else ''}dataset_{i}"
+ ds = es.enter_context(dataset(ds_path))
+ ds_list.append(ds)
+
+ yield {'zv': zv_list, 'ds': ds_list, 'dir': dir_list, 'file': file_list}
+
+
+def crange(c1, c2):
+ """
+ Generates the characters from `c1` to `c2`, inclusive.
+ Simple lowercase ascii only.
+ NOTE: Not safe for runtime code
+ """
+ ord_a = 97
+ ord_z = 122
+ c1_ord = ord(c1)
+ c2_ord = ord(c2)
+ assert c1_ord < c2_ord, f"'{c1}' must be 'less than' '{c2}'"
+ assert ord_a <= c1_ord <= ord_z
+ assert ord_a <= c2_ord <= ord_z
+ for c in range(c1_ord, c2_ord + 1):
+ yield chr(c)
+
+
def get_systemctl_status(service):
""" Return 'RUNNING' or 'STOPPED' """
try:
@@ -170,14 +235,28 @@ def user_list_users(snmp_config):
# This call will timeout if SNMP is not running
res = ssh(cmd)
- return [x.split()[-1].strip('\"') for x in res.splitlines()]
+ return [x.split(':')[-1].strip(' \"') for x in res.splitlines()]
+
+
+def v2c_snmpwalk(mib):
+ """
+ Run snmpwalk with v2c protocol
+ mib is the item to be gathered. mib format examples:
+ iso.3.6.1.6.3.15.1.2.2.1.3
+ 1.3.6.1.4.1.50536.1.2
+ """
+ cmd = f"snmpwalk -v2c -cpublic localhost {mib}"
+
+ # This call will timeout if SNMP is not running
+ res = ssh(cmd)
+ return [x.split(':')[-1].strip(' \"') for x in res.splitlines()]
# =====================================================================
# Tests
# =====================================================================
-@pytest.mark.usefixtures("initialize_and_start_snmp")
class TestSNMP:
+
def test_configure_SNMP(self, initialize_and_start_snmp):
config = initialize_and_start_snmp
@@ -349,3 +428,18 @@ def test_SNMPv3_user_delete(self):
with pytest.raises(Exception) as ve:
res = user_list_users(SNMP_USER_CONFIG)
assert "Unknown user name" in str(ve.value)
+
+ def test_zvol_reporting(self, create_nested_structure):
+ """
+ The TrueNAS snmp agent should list all zvols.
+ TrueNAS zvols can be created on any ZFS pool or dataset.
+ The snmp agent should list them all.
+ snmpwalk -v2c -cpublic localhost 1.3.6.1.4.1.50536.1.2.1.1.2
+ """
+ # The expectation is that the snmp agent should list exactly the six zvols.
+ created_items = create_nested_structure
+
+ # Include a snapshot of one of the zvols
+ with snapshot(created_items['zv'][0], "snmpsnap01"):
+ snmp_res = v2c_snmpwalk('1.3.6.1.4.1.50536.1.2.1.1.2')
+ assert all(v in created_items['zv'] for v in snmp_res), f"expected {created_items['zv']}, but found {snmp_res}"
diff --git a/tests/api2/test_audit_smb.py b/tests/api2/test_audit_smb.py
new file mode 100644
index 0000000000000..e12ba132a6953
--- /dev/null
+++ b/tests/api2/test_audit_smb.py
@@ -0,0 +1,83 @@
+import os
+import sys
+
+import pytest
+from middlewared.service_exception import CallError
+from middlewared.test.integration.assets.pool import dataset
+from middlewared.test.integration.utils import call
+from middlewared.test.integration.utils.audit import expect_audit_method_calls
+
+sys.path.append(os.getcwd())
+
+REDACTED_SECRET = '********'
+
+
+@pytest.fixture(scope='module')
+def smb_audit_dataset(request):
+ with dataset('audit-test-smb') as ds:
+ try:
+ yield ds
+ finally:
+ pass
+
+
+def test_smb_update_audit():
+ '''
+ Test the auditing of SMB configuration changes
+ '''
+ initial_smb_config = call('smb.config')
+ payload = {'enable_smb1': True}
+ try:
+ with expect_audit_method_calls([{
+ 'method': 'smb.update',
+ 'params': [payload],
+ 'description': 'Update SMB configuration',
+ }]):
+ call('smb.update', payload)
+ finally:
+ call('smb.update', {'enable_smb1': False})
+
+
+def test_smb_share_audit(smb_audit_dataset):
+ '''
+ Test the auditing of SMB share operations
+ '''
+ smb_share_path = os.path.join('/mnt', smb_audit_dataset)
+ try:
+ # CREATE
+ payload = {
+ "comment": "My Test Share",
+ "path": smb_share_path,
+ "name": "audit_share"
+ }
+ with expect_audit_method_calls([{
+ 'method': 'sharing.smb.create',
+ 'params': [payload],
+ 'description': f'SMB share create audit_share',
+ }]):
+ share_config = call('sharing.smb.create', payload)
+
+ # UPDATE
+ payload = {
+ "ro": True
+ }
+ with expect_audit_method_calls([{
+ 'method': 'sharing.smb.update',
+ 'params': [
+ share_config['id'],
+ payload,
+ ],
+ 'description': f'SMB share update audit_share',
+ }]):
+ share_config = call('sharing.smb.update', share_config['id'], payload)
+
+ finally:
+ if share_config is not None:
+ # DELETE
+ share_id = share_config['id']
+ with expect_audit_method_calls([{
+ 'method': 'sharing.smb.delete',
+ 'params': [share_id],
+ 'description': f'SMB share delete audit_share',
+ }]):
+ call('sharing.smb.delete', share_id)
diff --git a/tests/api2/test_pool_replace_disk.py b/tests/api2/test_pool_replace_disk.py
index a2c933403c8ef..eefa3ef79c829 100644
--- a/tests/api2/test_pool_replace_disk.py
+++ b/tests/api2/test_pool_replace_disk.py
@@ -43,5 +43,25 @@ def test_pool_replace_disk(topology, i):
assert len(disks(pool["topology"])) == count
assert disks(pool["topology"])[i]["disk"] == new_disk["devname"]
- assert call("disk.get_instance", new_disk["identifier"], {"extra": {"pools": True}})["pool"] == pool["name"]
- assert call("disk.get_instance", to_replace_disk["identifier"], {"extra": {"pools": True}})["pool"] is None
+ # this is flakey on our VMs as well, give it a bit of time before we assert
+ new = to_replace = None
+ for _ in range(30):
+ if all((new, to_replace)):
+ break
+ elif new is None:
+ p = call("disk.get_instance", new_disk["identifier"], {"extra": {"pools": True}})
+ if p["pool"] == pool["name"]:
+ new = True
+ else:
+ sleep(1)
+ elif to_replace is None:
+ t = call("disk.get_instance", to_replace_disk["identifier"], {"extra": {"pools": True}})
+ if t["pool"] is None:
+ to_replace = True
+ else:
+ sleep(1)
+ else:
+ if new is None:
+ assert False, f'disk.get_instance failed on {new_disk["identifier"]!r}'
+ if to_replace is None:
+ assert False, f'disk.get_instance failed on {to_replace["identifier"]!r}'
diff --git a/tests/protocols/iscsi_proto.py b/tests/protocols/iscsi_proto.py
index f21d4b7f15f88..6a4a705c53a4f 100644
--- a/tests/protocols/iscsi_proto.py
+++ b/tests/protocols/iscsi_proto.py
@@ -49,6 +49,7 @@ def iscsi_scsi_connect(host, iqn, lun=0, user=None, secret=None, target_user=Non
else:
device = init_device(device_str)
s = SCSI(device)
+ s.blocksize = 512
return s