diff --git a/debian/debian/ix-freebsd-to-scale-update.service b/debian/debian/ix-freebsd-to-scale-update.service
deleted file mode 100644
index d189b90bb4e62..0000000000000
--- a/debian/debian/ix-freebsd-to-scale-update.service
+++ /dev/null
@@ -1,25 +0,0 @@
-[Unit]
-Description=Update TrueNAS 12 to SCALE
-DefaultDependencies=no
-
-Before=network-pre.target
-
-After=middlewared.service
-Before=ix-etc.service
-Before=ix-netif.service
-Before=ix-preinit.service
-Before=ix-syncdisks.service
-Before=ix-zfs.service
-Before=local-fs.target
-
-ConditionPathExists=/data/freebsd-to-scale-update
-
-[Service]
-Type=oneshot
-RemainAfterExit=yes
-TimeoutStartSec=300
-ExecStart=midclt call --job update.freebsd_to_scale
-StandardOutput=null
-
-[Install]
-WantedBy=multi-user.target
diff --git a/debian/debian/rules b/debian/debian/rules
index ce0992820b0ff..5b9950cd0c444 100755
--- a/debian/debian/rules
+++ b/debian/debian/rules
@@ -12,7 +12,6 @@ override_dh_installsystemd:
dh_installsystemd --no-start -r --no-restart-after-upgrade --name=ix-boot-core
dh_installsystemd --no-start -r --no-restart-after-upgrade --name=ix-cgroups
dh_installsystemd --no-start -r --no-restart-after-upgrade --name=ix-etc
- dh_installsystemd --no-start -r --no-restart-after-upgrade --name=ix-freebsd-to-scale-update
dh_installsystemd --no-start -r --no-restart-after-upgrade --name=ix-netif
dh_installsystemd --no-start -r --no-restart-after-upgrade --name=ix-postinit
dh_installsystemd --no-start -r --no-restart-after-upgrade --name=ix-preinit
diff --git a/src/freenas/etc/modprobe.d/blacklist-qla2xxx.conf b/src/freenas/etc/modprobe.d/blacklist-qla2xxx.conf
new file mode 100644
index 0000000000000..634895c6d51f3
--- /dev/null
+++ b/src/freenas/etc/modprobe.d/blacklist-qla2xxx.conf
@@ -0,0 +1 @@
+blacklist qla2xxx
diff --git a/src/freenas/etc/modprobe.d/qla2xxx_scst.conf b/src/freenas/etc/modprobe.d/qla2xxx_scst.conf
new file mode 100644
index 0000000000000..f4c4bb036a42b
--- /dev/null
+++ b/src/freenas/etc/modprobe.d/qla2xxx_scst.conf
@@ -0,0 +1 @@
+options qla2xxx_scst qlini_mode=disabled
diff --git a/src/freenas/usr/local/libexec/disable-rootfs-protection b/src/freenas/usr/local/libexec/disable-rootfs-protection
index 62a9388212aa1..ed39051ee3f31 100755
--- a/src/freenas/usr/local/libexec/disable-rootfs-protection
+++ b/src/freenas/usr/local/libexec/disable-rootfs-protection
@@ -9,6 +9,8 @@ from truenas_api_client import Client
from pathlib import Path
from subprocess import run
+from middlewared.utils import ProductType
+
ZFS_CMD = '/usr/sbin/zfs'
TO_CHMOD = ['apt', 'dpkg']
@@ -91,11 +93,11 @@ if __name__ == '__main__':
# environment.
if rv.stdout.decode().strip() != 'on' and not args.force:
with Client() as c:
- if c.call('system.product_type') == 'SCALE_ENTERPRISE':
+ if c.call('system.product_type') == ProductType.SCALE_ENTERPRISE:
print((
- 'Root filesystem protections may not be administratively disabled '
- 'on Enterprise-licensed TrueNAS products. Circumventing this '
- 'restriction is considered an unsupported configuration.'
+ 'Root filesystem protections may not be administratively disabled '
+ 'on Enterprise-licensed TrueNAS products. Circumventing this '
+ 'restriction is considered an unsupported configuration.'
))
sys.exit(1)
try:
diff --git a/src/middlewared/middlewared/alembic/versions/24.10/2024-09-03_20-33_docker_addr_pool.py b/src/middlewared/middlewared/alembic/versions/24.10/2024-09-03_20-33_docker_addr_pool.py
index 0087938584026..0f32c615e8d8d 100644
--- a/src/middlewared/middlewared/alembic/versions/24.10/2024-09-03_20-33_docker_addr_pool.py
+++ b/src/middlewared/middlewared/alembic/versions/24.10/2024-09-03_20-33_docker_addr_pool.py
@@ -22,7 +22,7 @@ def upgrade():
'address_pools',
sa.TEXT(),
nullable=False,
- server_default='[{"base": "172.30.0.0/16", "size": 27}, {"base": "172.31.0.0/16", "size": 27}]'
+ server_default='[{"base": "172.17.0.0/12", "size": 24}]'
)
)
diff --git a/src/middlewared/middlewared/alembic/versions/24.10/2024-10-03_20-46_docker_address_pool.py b/src/middlewared/middlewared/alembic/versions/24.10/2024-10-03_20-46_docker_address_pool.py
new file mode 100644
index 0000000000000..29cca38de18a7
--- /dev/null
+++ b/src/middlewared/middlewared/alembic/versions/24.10/2024-10-03_20-46_docker_address_pool.py
@@ -0,0 +1,35 @@
+"""
+Docker address pool default updated
+
+Revision ID: 92b98613c498
+Revises: c31881e67797
+Create Date: 2024-10-03 20:46:17.935672+00:00
+"""
+import json
+
+from alembic import op
+
+
+revision = '92b98613c498'
+down_revision = 'c31881e67797'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ conn = op.get_bind()
+
+ if docker_config := list(map(
+ dict, conn.execute('SELECT * FROM services_docker').fetchall()
+ )):
+ docker_config = docker_config[0]
+ address_pool_config = json.loads(docker_config['address_pools'])
+
+ if address_pool_config == [{'base': '172.30.0.0/16', 'size': 27}, {'base': '172.31.0.0/16', 'size': 27}]:
+ conn.execute("UPDATE services_docker SET address_pools = ? WHERE id = ?", [json.dumps(
+ [{"base": "172.17.0.0/12", "size": 24}]
+ ), docker_config['id']])
+
+
+def downgrade():
+ pass
diff --git a/src/middlewared/middlewared/alembic/versions/25.04/2024-09-25_13-52_remove_truenas_customerinformation.py b/src/middlewared/middlewared/alembic/versions/25.04/2024-09-25_13-52_remove_truenas_customerinformation.py
new file mode 100644
index 0000000000000..e9b06cc9cee3b
--- /dev/null
+++ b/src/middlewared/middlewared/alembic/versions/25.04/2024-09-25_13-52_remove_truenas_customerinformation.py
@@ -0,0 +1,35 @@
+"""Remove TrueNAS customer information
+
+Revision ID: f449b425ad89
+Revises: 6dedf12c1035
+Create Date: 2024-09-25 13:52:07.834992+00:00
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = 'f449b425ad89'
+down_revision = '6dedf12c1035'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('truenas_customerinformation')
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('truenas_customerinformation',
+ sa.Column('id', sa.INTEGER(), nullable=False),
+ sa.Column('data', sa.TEXT(), nullable=False),
+ sa.Column('updated_at', sa.DATETIME(), nullable=False),
+ sa.Column('sent_at', sa.DATETIME(), nullable=True),
+ sa.Column('form_dismissed', sa.BOOLEAN(), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ # ### end Alembic commands ###
diff --git a/src/middlewared/middlewared/alembic/versions/25.04/2024-10-04_00-45_merge.py b/src/middlewared/middlewared/alembic/versions/25.04/2024-10-04_00-45_merge.py
new file mode 100644
index 0000000000000..cf24730e19a40
--- /dev/null
+++ b/src/middlewared/middlewared/alembic/versions/25.04/2024-10-04_00-45_merge.py
@@ -0,0 +1,19 @@
+"""Merge
+
+Revision ID: 5fe28eada969
+Revises: f449b425ad89, 92b98613c498
+Create Date: 2024-10-04 00:45:59.547731+00:00
+"""
+
+revision = '5fe28eada969'
+down_revision = ('f449b425ad89', '92b98613c498')
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ pass
+
+
+def downgrade():
+ pass
diff --git a/src/middlewared/middlewared/alert/base.py b/src/middlewared/middlewared/alert/base.py
index 5694d99f89a84..3c28e090bfcb1 100644
--- a/src/middlewared/middlewared/alert/base.py
+++ b/src/middlewared/middlewared/alert/base.py
@@ -7,7 +7,7 @@
import html2text
from middlewared.alert.schedule import IntervalSchedule
-from middlewared.plugins.system.product import PRODUCT_NAME
+from middlewared.utils import ProductName, ProductType
from middlewared.utils.lang import undefined
__all__ = ["UnavailableException",
@@ -72,7 +72,7 @@ class AlertClass(metaclass=AlertClassMeta):
text = None
exclude_from_list = False
- products = ("CORE", "ENTERPRISE", "SCALE", "SCALE_ENTERPRISE")
+ products = ("CORE", "ENTERPRISE", ProductType.SCALE, ProductType.SCALE_ENTERPRISE)
proactive_support = False
proactive_support_notify_gone = False
@@ -311,7 +311,7 @@ class AlertSource:
schedule = IntervalSchedule(timedelta())
- products = ("CORE", "ENTERPRISE", "SCALE", "SCALE_ENTERPRISE")
+ products = ("CORE", "ENTERPRISE", ProductType.SCALE, ProductType.SCALE_ENTERPRISE)
failover_related = False
run_on_backup_node = True
@@ -376,7 +376,7 @@ async def _format_alerts(self, alerts, gone_alerts, new_alerts):
else:
node_map = None
- html = format_alerts(PRODUCT_NAME, hostname, node_map, alerts, gone_alerts, new_alerts)
+ html = format_alerts(ProductName.PRODUCT_NAME, hostname, node_map, alerts, gone_alerts, new_alerts)
if self.html:
return html
@@ -397,7 +397,7 @@ def _format_alerts(self, alerts, gone_alerts, new_alerts):
node_map = self.middleware.call_sync("alert.node_map")
else:
node_map = None
- return format_alerts(PRODUCT_NAME, hostname, node_map, alerts, gone_alerts, new_alerts)
+ return format_alerts(ProductName.PRODUCT_NAME, hostname, node_map, alerts, gone_alerts, new_alerts)
class ProThreadedAlertService(ThreadedAlertService):
diff --git a/src/middlewared/middlewared/alert/source/auth.py b/src/middlewared/middlewared/alert/source/auth.py
index 03297fe0209b7..41a1f356d04cd 100644
--- a/src/middlewared/middlewared/alert/source/auth.py
+++ b/src/middlewared/middlewared/alert/source/auth.py
@@ -1,5 +1,6 @@
from middlewared.alert.base import Alert, AlertCategory, AlertClass, AlertLevel, AlertSource
from middlewared.alert.schedule import CrontabSchedule
+from middlewared.utils import ProductType
from middlewared.utils.audit import UNAUTHENTICATED
from time import time
@@ -38,7 +39,7 @@ def audit_entry_to_msg(entry):
class AdminSessionAlertSource(AlertSource):
schedule = CrontabSchedule(hour=1) # every 24 hours
run_on_backup_node = True
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
async def check(self):
now = int(time())
diff --git a/src/middlewared/middlewared/alert/source/enclosure_status.py b/src/middlewared/middlewared/alert/source/enclosure_status.py
index c5e973efd2f2e..4de9e38009716 100644
--- a/src/middlewared/middlewared/alert/source/enclosure_status.py
+++ b/src/middlewared/middlewared/alert/source/enclosure_status.py
@@ -2,16 +2,36 @@
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
+from dataclasses import dataclass
+from middlewared.utils import ProductType
-from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
+from middlewared.alert.base import (
+ AlertClass,
+ AlertCategory,
+ AlertLevel,
+ Alert,
+ AlertSource,
+)
+
+
+@dataclass(slots=True, frozen=True, kw_only=True)
+class BadElement:
+ enc_name: str
+ descriptor: str
+ status: str
+ value: str
+ value_raw: int
+
+ def args(self):
+ return [self.enc_name, self.descriptor, self.status, self.value, self.value_raw]
class EnclosureUnhealthyAlertClass(AlertClass):
category = AlertCategory.HARDWARE
level = AlertLevel.CRITICAL
title = "Enclosure Status Is Not Healthy"
- text = "Enclosure (%s): Element \"%s\" is reporting a status of \"%s\" with a value of \"%s\". (raw value \"%s\")"
- products = ("SCALE_ENTERPRISE",)
+ text = 'Enclosure (%s): Element "%s" is reporting a status of "%s" with a value of "%s". (raw value "%s")'
+ products = (ProductType.SCALE_ENTERPRISE,)
class EnclosureHealthyAlertClass(AlertClass):
@@ -19,72 +39,69 @@ class EnclosureHealthyAlertClass(AlertClass):
level = AlertLevel.INFO
title = "Enclosure Status Is Healthy"
text = "Enclosure (%s) is healthy."
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class EnclosureStatusAlertSource(AlertSource):
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
- bad = ('critical', 'noncritical', 'unknown', 'unrecoverable')
- bad_elements = []
-
- async def should_report(self, enclosure, element):
- should_report = True
- if element['status'].lower() in self.bad and element['value'] != 'None':
- if element['name'] == 'Enclosure':
- # this is an element that provides an "overview" for all the other elements
- # i.e. if a power supply element is reporting critical, this will (should)
- # report critical as well. Sometimes, however, this will constantly report
- # a bad status, just ignore it #11918
- should_report = False
- elif enclosure['name'] == 'ECStream 3U16+4R-4X6G.3 d10c' and element['descriptor'] == '1.8V Sensor':
- # The 1.8V sensor is bugged on the echostream enclosure (Z-series). The
- # management chip loses it's mind and claims undervoltage, but scoping
- # this confirms the voltage is fine. Ignore alerts from this element. #10077
- should_report = False
- else:
- should_report = False
-
- return should_report
+ bad = ("critical", "noncritical", "unknown", "unrecoverable")
+ bad_elements: list | list[tuple[BadElement, int]] = list()
+
+ async def should_report(self, ele_type: str, ele_value: dict[str]):
+ """We only want to raise an alert for an element's status
+ if it meets a certain criteria"""
+ if not ele_value["value"]:
+ # if we don't have an actual value, doesn't
+ # matter what status the element is reporting
+ # we'll skip it so we don't raise alarm to
+ # end-user unnecessarily
+ return False
+ elif ele_value["status"].lower() not in self.bad:
+ return False
+
+ return True
async def check(self):
- good_enclosures = []
- bad_elements = []
- for enc in await self.middleware.call('enclosure.query'):
- good_enclosures.append([enc['name']])
-
- for element_values in enc['elements']:
- for value in element_values['elements']:
- if await self.should_report(enc, value):
- args = [
- enc['name'],
- value['name'],
- value['status'],
- value['value'],
- value['value_raw']
- ]
- for i, (another_args, count) in enumerate(self.bad_elements):
- if another_args == args:
- bad_elements.append((args, count + 1))
+ good_enclosures, bad_elements = [], []
+ for enc in await self.middleware.call("enclosure2.query"):
+ good_enclosures.append([f"{enc['name']} (id: {enc['id']})"])
+ enc["elements"].pop("Array Device Slot") # dont care about disk slots
+ for element_type, element_values in enc["elements"].items():
+ for ele_value in element_values.values():
+ if await self.should_report(element_type, ele_value):
+ current_bad_element = BadElement(
+ enc_name=enc["name"],
+ descriptor=ele_value["descriptor"],
+ status=ele_value["status"],
+ value=ele_value["value"],
+ value_raw=ele_value["value_raw"],
+ )
+ for previous_bad_element, count in self.bad_elements:
+ if previous_bad_element == current_bad_element:
+ bad_elements.append((current_bad_element, count + 1))
break
else:
- bad_elements.append((args, 1))
+ bad_elements.append((current_bad_element, 1))
self.bad_elements = bad_elements
alerts = []
- for args, count in bad_elements:
- # We only report unhealthy enclosure elements if they were unhealthy 5 probes in a row (1 probe = 1 minute)
+ for current_bad_element, count in bad_elements:
+ # We only report unhealthy enclosure elements if
+ # they were unhealthy 5 probes in a row (1 probe = 1 minute)
if count >= 5:
try:
- good_enclosures.remove(args[:1])
+ good_enclosures.remove(current_bad_element.enc_name)
except ValueError:
pass
- alerts.append(Alert(EnclosureUnhealthyAlertClass, args=args))
+ alerts.append(
+ Alert(EnclosureUnhealthyAlertClass, args=current_bad_element.args())
+ )
- for args in good_enclosures:
- alerts.append(Alert(EnclosureHealthyAlertClass, args=args))
+ for enclosure in good_enclosures:
+ alerts.append(Alert(EnclosureHealthyAlertClass, args=enclosure))
return alerts
diff --git a/src/middlewared/middlewared/alert/source/failover.py b/src/middlewared/middlewared/alert/source/failover.py
index ffd24a043ec67..79d967ef0a8f3 100644
--- a/src/middlewared/middlewared/alert/source/failover.py
+++ b/src/middlewared/middlewared/alert/source/failover.py
@@ -6,6 +6,7 @@
import errno
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource, UnavailableException
+from middlewared.utils import ProductType
from middlewared.service_exception import CallError
@@ -14,7 +15,7 @@ class FailoverInterfaceNotFoundAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'Failover Internal Interface Not Found'
text = 'Failover internal interface not found. Contact support.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class TrueNASVersionsMismatchAlertClass(AlertClass):
@@ -22,7 +23,7 @@ class TrueNASVersionsMismatchAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'TrueNAS Software Versions Must Match Between Storage Controllers'
text = 'TrueNAS software versions must match between storage controllers.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class FailoverStatusCheckFailedAlertClass(AlertClass):
@@ -30,7 +31,7 @@ class FailoverStatusCheckFailedAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'Failed to Check Failover Status with the Other Controller'
text = 'Failed to check failover status with the other controller: %s.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class FailoverFailedAlertClass(AlertClass):
@@ -38,7 +39,7 @@ class FailoverFailedAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'Failover Failed'
text = 'Failover failed. Check /var/log/failover.log on both controllers.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class VRRPStatesDoNotAgreeAlertClass(AlertClass):
@@ -46,11 +47,11 @@ class VRRPStatesDoNotAgreeAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'Controllers VRRP States Do Not Agree'
text = 'Controllers VRRP states do not agree: %(error)s.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class FailoverAlertSource(AlertSource):
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
diff --git a/src/middlewared/middlewared/alert/source/failover_disks.py b/src/middlewared/middlewared/alert/source/failover_disks.py
index 7f0ee17b0014e..926059ce33f8b 100644
--- a/src/middlewared/middlewared/alert/source/failover_disks.py
+++ b/src/middlewared/middlewared/alert/source/failover_disks.py
@@ -4,6 +4,7 @@
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
+from middlewared.utils import ProductType
TITLE = 'Disks Missing On '
TEXT = 'Disks with serial %(serials)s present on '
@@ -14,7 +15,7 @@ class DisksAreNotPresentOnStandbyNodeAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = TITLE + 'Standby Storage Controller'
text = TEXT + 'active storage controller but missing on standby storage controller.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class DisksAreNotPresentOnActiveNodeAlertClass(AlertClass):
@@ -22,11 +23,11 @@ class DisksAreNotPresentOnActiveNodeAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = TITLE + 'Active Storage Controller'
text = TEXT + 'standby storage controller but missing on active storage controller.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class FailoverDisksAlertSource(AlertSource):
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
diff --git a/src/middlewared/middlewared/alert/source/failover_interfaces.py b/src/middlewared/middlewared/alert/source/failover_interfaces.py
index 19d468236675a..403b20be50399 100644
--- a/src/middlewared/middlewared/alert/source/failover_interfaces.py
+++ b/src/middlewared/middlewared/alert/source/failover_interfaces.py
@@ -4,6 +4,7 @@
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, AlertSource, Alert
+from middlewared.utils import ProductType
class NoCriticalFailoverInterfaceFoundAlertClass(AlertClass):
@@ -11,11 +12,11 @@ class NoCriticalFailoverInterfaceFoundAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'At Least 1 Network Interface Is Required To Be Marked Critical For Failover'
text = 'At least 1 network interface is required to be marked critical for failover.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class FailoverCriticalAlertSource(AlertSource):
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
diff --git a/src/middlewared/middlewared/alert/source/failover_nics.py b/src/middlewared/middlewared/alert/source/failover_nics.py
index 6f562b575299e..4fa019666d4f1 100644
--- a/src/middlewared/middlewared/alert/source/failover_nics.py
+++ b/src/middlewared/middlewared/alert/source/failover_nics.py
@@ -4,6 +4,7 @@
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
+from middlewared.utils import ProductType
TITLE = 'Missing Network Interface On '
TEXT = 'Network interfaces %(interfaces)s present on '
@@ -14,7 +15,7 @@ class NetworkCardsMismatchOnStandbyNodeAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = TITLE + 'Standby Storage Controller'
text = TEXT + 'active storage controller but missing on standby storage controller.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class NetworkCardsMismatchOnActiveNodeAlertClass(AlertClass):
@@ -22,11 +23,11 @@ class NetworkCardsMismatchOnActiveNodeAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = TITLE + 'Active Storage Controller'
text = TEXT + 'standby storage controller but missing on active storage controller.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class FailoverNetworkCardsAlertSource(AlertSource):
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
diff --git a/src/middlewared/middlewared/alert/source/failover_remote_inaccessible.py b/src/middlewared/middlewared/alert/source/failover_remote_inaccessible.py
index 42a62ed2a956e..5b3e6a258eb0a 100644
--- a/src/middlewared/middlewared/alert/source/failover_remote_inaccessible.py
+++ b/src/middlewared/middlewared/alert/source/failover_remote_inaccessible.py
@@ -6,6 +6,7 @@
import time
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource, UnavailableException
+from middlewared.utils import ProductType
from middlewared.utils.crypto import generate_token
@@ -14,13 +15,13 @@ class FailoverRemoteSystemInaccessibleAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'Other Controller is Inaccessible'
text = 'Other TrueNAS controller is inaccessible. Contact support. Incident ID: %s.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
proactive_support_notify_gone = True
class FailoverRemoteSystemInaccessibleAlertSource(AlertSource):
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
failover_related = True
run_on_backup_node = False
diff --git a/src/middlewared/middlewared/alert/source/failover_sync.py b/src/middlewared/middlewared/alert/source/failover_sync.py
index d6aa27ac8688f..b5b146328ca76 100644
--- a/src/middlewared/middlewared/alert/source/failover_sync.py
+++ b/src/middlewared/middlewared/alert/source/failover_sync.py
@@ -6,6 +6,7 @@
from middlewared.alert.base import (
Alert, AlertClass, SimpleOneShotAlertClass, AlertCategory, AlertLevel, OneShotAlertClass
)
+from middlewared.utils import ProductType
class FailoverSyncFailedAlertClass(AlertClass, SimpleOneShotAlertClass):
@@ -17,7 +18,7 @@ class FailoverSyncFailedAlertClass(AlertClass, SimpleOneShotAlertClass):
"the standby storage controller but failed. Use Sync to Peer on the "
"System/Failover page to try and perform a manual sync."
)
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
async def create(self, args):
return Alert(FailoverSyncFailedAlertClass, {'mins': args['mins']})
@@ -36,7 +37,7 @@ class FailoverKeysSyncFailedAlertClass(AlertClass, SimpleOneShotAlertClass):
"The automatic synchronization of encryption passphrases with the standby "
"controller has failed. Please go to System > Failover and manually sync to peer."
)
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class FailoverKMIPKeysSyncFailedAlertClass(AlertClass, OneShotAlertClass):
@@ -49,7 +50,7 @@ class FailoverKMIPKeysSyncFailedAlertClass(AlertClass, OneShotAlertClass):
"The automatic synchronization of KMIP keys with the standby "
"controller has failed due to %(error)s. Please go to System > Failover and manually sync to peer."
)
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
async def create(self, args):
return Alert(FailoverKMIPKeysSyncFailedAlertClass, args)
diff --git a/src/middlewared/middlewared/alert/source/jbof.py b/src/middlewared/middlewared/alert/source/jbof.py
index 2b97b7eae9999..1db97eafee0a9 100644
--- a/src/middlewared/middlewared/alert/source/jbof.py
+++ b/src/middlewared/middlewared/alert/source/jbof.py
@@ -8,6 +8,7 @@
from middlewared.alert.base import Alert, AlertCategory, AlertClass, AlertLevel, AlertSource, SimpleOneShotAlertClass
from middlewared.alert.schedule import IntervalSchedule
from middlewared.plugins.enclosure_.enums import ElementStatus, ElementType
+from middlewared.utils import ProductType
class JBOFTearDownFailureAlertClass(AlertClass, SimpleOneShotAlertClass):
@@ -25,7 +26,7 @@ class JBOFRedfishCommAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'Failed to Communicate with JBOF'
text = 'JBOF: "%(desc)s" (%(ip1)s/%(ip2)s) Failed to communicate with redfish interface.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class JBOFInvalidDataAlertClass(AlertClass):
@@ -33,7 +34,7 @@ class JBOFInvalidDataAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'JBOF has invalid data'
text = 'JBOF: "%(desc)s" (%(ip1)s/%(ip2)s) does not provide valid data for: %(keys)s'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class JBOFElementWarningAlertClass(AlertClass):
@@ -41,7 +42,7 @@ class JBOFElementWarningAlertClass(AlertClass):
level = AlertLevel.WARNING
title = 'JBOF element non-critical'
text = 'JBOF: "%(desc)s" (%(ip1)s/%(ip2)s) %(etype)s %(key)s is noncritical: %(value)s'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class JBOFElementCriticalAlertClass(AlertClass):
@@ -49,11 +50,11 @@ class JBOFElementCriticalAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'JBOF element critical'
text = 'JBOF: "%(desc)s" (%(ip1)s/%(ip2)s) %(etype)s %(key)s is critical: %(value)s'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class JBOFAlertSource(AlertSource):
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
run_on_backup_node = False
schedule = IntervalSchedule(datetime.timedelta(minutes=5))
diff --git a/src/middlewared/middlewared/alert/source/legacy_mini_bmc.py b/src/middlewared/middlewared/alert/source/legacy_mini_bmc.py
index d38bfa37b92bb..89b35a71e2258 100644
--- a/src/middlewared/middlewared/alert/source/legacy_mini_bmc.py
+++ b/src/middlewared/middlewared/alert/source/legacy_mini_bmc.py
@@ -1,4 +1,5 @@
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, AlertSource, Alert
+from middlewared.utils import ProductType
URL = "https://www.truenas.com/docs/hardware/legacyhardware/miniseries/freenas-minis-2nd-gen/freenasminibmcwatchdog/"
@@ -12,11 +13,11 @@ class TrueNASMiniBMCAlertClass(AlertClass):
f""
"ASRock Rack C2750D4I BMC Watchdog Issue for details."
)
- products = ("SCALE",)
+ products = (ProductType.SCALE,)
class TrueNASMiniBMCAlertSource(AlertSource):
- products = ("SCALE",)
+ products = (ProductType.SCALE,)
async def check(self):
dmi = await self.middleware.call("system.dmidecode_info")
diff --git a/src/middlewared/middlewared/alert/source/license_status.py b/src/middlewared/middlewared/alert/source/license_status.py
index 19bc305832a3d..0a4be86145fc6 100644
--- a/src/middlewared/middlewared/alert/source/license_status.py
+++ b/src/middlewared/middlewared/alert/source/license_status.py
@@ -9,6 +9,7 @@
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
from middlewared.alert.schedule import IntervalSchedule
+from middlewared.utils import ProductType
from middlewared.utils.license import LICENSE_ADDHW_MAPPING
@@ -17,7 +18,7 @@ class LicenseAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = "TrueNAS License Issue"
text = "%s"
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class LicenseIsExpiringAlertClass(AlertClass):
@@ -25,7 +26,7 @@ class LicenseIsExpiringAlertClass(AlertClass):
level = AlertLevel.WARNING
title = "TrueNAS License Is Expiring"
text = "%s"
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class LicenseHasExpiredAlertClass(AlertClass):
@@ -33,11 +34,11 @@ class LicenseHasExpiredAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = "TrueNAS License Has Expired"
text = "%s"
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class LicenseStatusAlertSource(ThreadedAlertSource):
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
run_on_backup_node = False
schedule = IntervalSchedule(timedelta(hours=24))
diff --git a/src/middlewared/middlewared/alert/source/memory_errors.py b/src/middlewared/middlewared/alert/source/memory_errors.py
index af62a60c611d4..2a4c21b5e2268 100644
--- a/src/middlewared/middlewared/alert/source/memory_errors.py
+++ b/src/middlewared/middlewared/alert/source/memory_errors.py
@@ -5,6 +5,7 @@
from middlewared.alert.base import Alert, AlertCategory, AlertClass, AlertLevel, AlertSource
from middlewared.alert.schedule import CrontabSchedule
+from middlewared.utils import ProductType
from middlewared.utils.size import format_size
@@ -13,7 +14,7 @@ class MemoryErrorsAlertClass(AlertClass):
level = AlertLevel.WARNING
title = 'Uncorrected Memory Errors Detected'
text = '%(count)d total uncorrected errors detected for %(loc)s.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
@@ -22,7 +23,7 @@ class MemorySizeMismatchAlertClass(AlertClass):
level = AlertLevel.WARNING
title = 'Memory Size Mismatch Detected'
text = 'Memory size on this controller %(r1)s doesn\'t match other controller %(r2)s'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
diff --git a/src/middlewared/middlewared/alert/source/mseries_nvdimm_and_bios.py b/src/middlewared/middlewared/alert/source/mseries_nvdimm_and_bios.py
index bf1fce02625a7..34b12425c88a0 100644
--- a/src/middlewared/middlewared/alert/source/mseries_nvdimm_and_bios.py
+++ b/src/middlewared/middlewared/alert/source/mseries_nvdimm_and_bios.py
@@ -7,6 +7,7 @@
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
from middlewared.alert.schedule import IntervalSchedule
+from middlewared.utils import ProductType
WEBUI_SUPPORT_FORM = (
'Please contact iXsystems Support using the "File Ticket" button in the System Settings->General->Support form'
@@ -18,7 +19,7 @@ class NVDIMMAlertClass(AlertClass):
level = AlertLevel.WARNING
title = 'There Is An Issue With NVDIMM'
text = 'NVDIMM: "%(dev)s" is reporting "%(value)s" with status "%(status)s".'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMESLifetimeWarningAlertClass(AlertClass):
@@ -26,7 +27,7 @@ class NVDIMMESLifetimeWarningAlertClass(AlertClass):
level = AlertLevel.WARNING
title = 'NVDIMM Energy Source Lifetime Is Less Than 20%'
text = 'NVDIMM Energy Source Remaining Lifetime for %(dev)s is %(value)d%%.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMESLifetimeCriticalAlertClass(AlertClass):
@@ -34,7 +35,7 @@ class NVDIMMESLifetimeCriticalAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'NVDIMM Energy Source Lifetime Is Less Than 10%'
text = 'NVDIMM Energy Source Remaining Lifetime for %(dev)s is %(value)d%%.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMMemoryModLifetimeWarningAlertClass(AlertClass):
@@ -42,7 +43,7 @@ class NVDIMMMemoryModLifetimeWarningAlertClass(AlertClass):
level = AlertLevel.WARNING
title = 'NVDIMM Memory Module Lifetime Is Less Than 20%'
text = 'NVDIMM Memory Module Remaining Lifetime for %(dev)s is %(value)d%%.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMMemoryModLifetimeCriticalAlertClass(AlertClass):
@@ -50,7 +51,7 @@ class NVDIMMMemoryModLifetimeCriticalAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'NVDIMM Memory Module Lifetime Is Less Than 10%'
text = 'NVDIMM Memory Module Remaining Lifetime for %(dev)s is %(value)d%%.'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
class NVDIMMInvalidFirmwareVersionAlertClass(AlertClass):
@@ -58,7 +59,7 @@ class NVDIMMInvalidFirmwareVersionAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = 'Invalid NVDIMM Firmware Version'
text = f'NVDIMM: "%(dev)s" is running invalid firmware. {WEBUI_SUPPORT_FORM}'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
@@ -70,7 +71,7 @@ class NVDIMMRecommendedFirmwareVersionAlertClass(AlertClass):
'NVDIMM: "%(dev)s" is running firmware version "%(rv)s" which can be upgraded to '
f'"%(uv)s". {WEBUI_SUPPORT_FORM}'
)
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
@@ -79,13 +80,13 @@ class OldBiosVersionAlertClass(AlertClass):
level = AlertLevel.WARNING
title = 'Old BIOS Version'
text = f'This system is running an old BIOS version. {WEBUI_SUPPORT_FORM}'
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
class NVDIMMAndBIOSAlertSource(ThreadedAlertSource):
schedule = IntervalSchedule(datetime.timedelta(minutes=5))
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
def produce_alerts(self, nvdimm, alerts, old_bios):
persistency_restored = 0x4
diff --git a/src/middlewared/middlewared/alert/source/proactive_support.py b/src/middlewared/middlewared/alert/source/proactive_support.py
index 6d2e8fc56741c..20d938edda157 100644
--- a/src/middlewared/middlewared/alert/source/proactive_support.py
+++ b/src/middlewared/middlewared/alert/source/proactive_support.py
@@ -4,6 +4,7 @@
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource
+from middlewared.utils import ProductType
class ProactiveSupportAlertClass(AlertClass):
@@ -11,11 +12,11 @@ class ProactiveSupportAlertClass(AlertClass):
level = AlertLevel.WARNING
title = "Proactive Support Is Not Configured"
text = "%s"
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class ProactiveSupportAlertSource(AlertSource):
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
run_on_backup_node = False
async def check(self):
diff --git a/src/middlewared/middlewared/alert/source/sata_dom_wear.py b/src/middlewared/middlewared/alert/source/sata_dom_wear.py
index f83b05d919389..e824b1ba8ac2a 100644
--- a/src/middlewared/middlewared/alert/source/sata_dom_wear.py
+++ b/src/middlewared/middlewared/alert/source/sata_dom_wear.py
@@ -6,6 +6,7 @@
from datetime import timedelta
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, AlertSource, IntervalSchedule
+from middlewared.utils import ProductType
class SATADOMWearWarningAlertClass(AlertClass):
@@ -13,7 +14,7 @@ class SATADOMWearWarningAlertClass(AlertClass):
level = AlertLevel.WARNING
title = "SATA DOM Lifetime: Less Than 20% Left"
text = "%(lifetime)d%% of lifetime left on SATA DOM %(disk)s."
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class SATADOMWearCriticalAlertClass(AlertClass):
@@ -21,12 +22,12 @@ class SATADOMWearCriticalAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = "SATA DOM Lifetime: Less Than 10% Left"
text = "%(lifetime)d%% of lifetime left on SATA DOM %(disk)s."
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class SATADOMWearAlertSource(AlertSource):
schedule = IntervalSchedule(timedelta(hours=1))
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
async def check(self):
dmi = await self.middleware.call("system.dmidecode_info")
diff --git a/src/middlewared/middlewared/alert/source/sensors.py b/src/middlewared/middlewared/alert/source/sensors.py
index 12a4f88392a95..16f0e98faf21b 100644
--- a/src/middlewared/middlewared/alert/source/sensors.py
+++ b/src/middlewared/middlewared/alert/source/sensors.py
@@ -4,6 +4,7 @@
# See the file LICENSE.IX for complete terms and conditions
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, AlertSource, Alert
+from middlewared.utils import ProductType
class SensorAlertClass(AlertClass):
@@ -11,7 +12,7 @@ class SensorAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = "Sensor Value Is Outside of Working Range"
text = "Sensor %(name)s is %(relative)s %(level)s value: %(value)s %(event)s"
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class PowerSupplyAlertClass(AlertClass):
@@ -19,7 +20,7 @@ class PowerSupplyAlertClass(AlertClass):
level = AlertLevel.CRITICAL
title = "Power Supply Error"
text = "%(psu)s is %(state)s showing: %(errors)s"
- products = ("SCALE_ENTERPRISE",)
+ products = (ProductType.SCALE_ENTERPRISE,)
class SensorsAlertSource(AlertSource):
diff --git a/src/middlewared/middlewared/alert/source/usb_storage.py b/src/middlewared/middlewared/alert/source/usb_storage.py
index 4ebdf0a62f045..27f86d47514b7 100644
--- a/src/middlewared/middlewared/alert/source/usb_storage.py
+++ b/src/middlewared/middlewared/alert/source/usb_storage.py
@@ -6,6 +6,7 @@
from pathlib import Path
from middlewared.alert.base import AlertClass, AlertCategory, AlertLevel, Alert, ThreadedAlertSource
+from middlewared.utils import ProductType
class USBStorageAlertClass(AlertClass):
@@ -14,12 +15,12 @@ class USBStorageAlertClass(AlertClass):
title = 'A USB Storage Device Has Been Connected to This System'
text = ('A USB storage device %r has been connected to this system. Please remove that USB device to '
'prevent problems with system boot or HA failover.')
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
proactive_support = True
class USBStorageAlertSource(ThreadedAlertSource):
- products = ('SCALE_ENTERPRISE',)
+ products = (ProductType.SCALE_ENTERPRISE,)
def check_sync(self):
alerts = []
diff --git a/src/middlewared/middlewared/api/v25_04_0/__init__.py b/src/middlewared/middlewared/api/v25_04_0/__init__.py
index 20bbe0e865df2..0d9d33f5960e9 100644
--- a/src/middlewared/middlewared/api/v25_04_0/__init__.py
+++ b/src/middlewared/middlewared/api/v25_04_0/__init__.py
@@ -1,13 +1,18 @@
+from .acme_protocol import * # noqa
+from .alert import * # noqa
+from .alertservice import * # noqa
from .api_key import * # noqa
from .auth import * # noqa
from .cloud_sync import * # noqa
from .common import * # noqa
from .core import * # noqa
+from .disk import * # noqa
from .failover_reboot import * # noqa
from .group import * # noqa
from .keychain import * # noqa
from .privilege import * # noqa
+from .smartctl import * # noqa
from .system_reboot import * # noqa
+from .system_lifecycle import * # noqa
from .user import * # noqa
from .vendor import * # noqa
-from .smartctl import * # noqa
diff --git a/src/middlewared/middlewared/api/v25_04_0/acme_protocol.py b/src/middlewared/middlewared/api/v25_04_0/acme_protocol.py
new file mode 100644
index 0000000000000..222121dd44269
--- /dev/null
+++ b/src/middlewared/middlewared/api/v25_04_0/acme_protocol.py
@@ -0,0 +1,103 @@
+from pydantic import Field, Secret
+
+from middlewared.api.base import BaseModel, Excluded, excluded_field, single_argument_args, ForUpdateMetaclass
+
+
+__all__ = [
+ 'ACMERegistrationCreateArgs', 'ACMERegistrationCreateResult', 'DNSAuthenticatorUpdateArgs',
+ 'DNSAuthenticatorUpdateResult', 'DNSAuthenticatorCreateArgs', 'DNSAuthenticatorCreateResult',
+ 'DNSAuthenticatorDeleteArgs', 'DNSAuthenticatorDeleteResult', 'DNSAuthenticatorSchemasArgs',
+ 'DNSAuthenticatorSchemasResult', 'ACMERegistrationEntry', 'ACMEDNSAuthenticatorEntry',
+]
+
+
+class JWKCreate(BaseModel):
+ key_size: int = 2048
+ public_exponent: int = 65537
+
+
+class ACMERegistrationEntry(BaseModel):
+ id: int
+ uri: str
+ directory: str
+ tos: str
+ new_account_uri: str
+ new_nonce_uri: str
+ new_order_uri: str
+ revoke_cert_uri: str
+
+
+class ACMEDNSAuthenticatorEntry(BaseModel):
+ id: int
+ authenticator: str
+ attributes: Secret[dict]
+ name: str
+
+
+class DNSAuthenticatorCreate(ACMEDNSAuthenticatorEntry):
+ id: Excluded = excluded_field()
+
+
+class DNSAuthenticatorUpdate(DNSAuthenticatorCreate, metaclass=ForUpdateMetaclass):
+ authenticator: Excluded = excluded_field()
+
+
+class DNSAuthenticatorAttributeSchema(BaseModel):
+ _name_: str
+ title: str
+ _required_: bool
+
+
+class DNSAuthenticatorSchemaEntry(BaseModel):
+ key: str
+ schema_: list[DNSAuthenticatorAttributeSchema] = Field(..., alias='schema')
+
+
+################### Arguments ###################
+
+
+@single_argument_args('acme_registration_create')
+class ACMERegistrationCreateArgs(BaseModel):
+ tos: bool = False
+ jwk_create: JWKCreate = Field(default=JWKCreate())
+ acme_directory_uri: str
+
+
+class DNSAuthenticatorCreateArgs(BaseModel):
+ dns_authenticator_create: DNSAuthenticatorCreate
+
+
+class DNSAuthenticatorUpdateArgs(BaseModel):
+ id: int
+ dns_authenticator_update: DNSAuthenticatorUpdate
+
+
+class DNSAuthenticatorDeleteArgs(BaseModel):
+ id: int
+
+
+class DNSAuthenticatorSchemasArgs(BaseModel):
+ pass
+
+
+################### Returns ###################
+
+
+class ACMERegistrationCreateResult(BaseModel):
+ result: ACMERegistrationEntry
+
+
+class DNSAuthenticatorCreateResult(BaseModel):
+ result: ACMEDNSAuthenticatorEntry
+
+
+class DNSAuthenticatorUpdateResult(BaseModel):
+ result: ACMEDNSAuthenticatorEntry
+
+
+class DNSAuthenticatorDeleteResult(BaseModel):
+ result: bool
+
+
+class DNSAuthenticatorSchemasResult(BaseModel):
+ result: list[DNSAuthenticatorSchemaEntry]
diff --git a/src/middlewared/middlewared/api/v25_04_0/alert.py b/src/middlewared/middlewared/api/v25_04_0/alert.py
new file mode 100644
index 0000000000000..efaa81dd7e761
--- /dev/null
+++ b/src/middlewared/middlewared/api/v25_04_0/alert.py
@@ -0,0 +1,119 @@
+from typing import Any
+
+from pydantic import Field
+
+from middlewared.api.base import BaseModel, LongString
+
+
+__all__ = [
+ 'AlertDismissArgs', 'AlertListArgs', 'AlertDismissResult', 'AlertListResult', 'AlertListCategoriesArgs',
+ 'AlertListCategoriesResult', 'AlertListPoliciesArgs', 'AlertListPoliciesResult', 'AlertRestoreArgs',
+ 'AlertRestoreResult', 'AlertOneshotCreateArgs', 'AlertOneshotCreateResult', 'AlertOneshotDeleteArgs',
+ 'AlertOneshotDeleteResult', 'AlertClassesEntry', 'AlertClassesUpdateArgs', 'AlertClassesUpdateResult', 'Alert',
+]
+
+
+class Alert(BaseModel):
+ uuid: str
+ source: str
+ klass: str
+ args: Any
+ node: str
+ key: LongString
+ datetime: str
+ last_occurrence: str
+ dismissed: bool
+ mail: Any
+ text: LongString
+ id: str
+ level: str
+ formatted: LongString | None
+ one_shot: bool
+
+
+class AlertCategoryClass(BaseModel):
+ id: str
+ title: str
+ level: str
+ proactive_support: bool
+
+
+class AlertCategory(BaseModel):
+ id: str
+ title: str
+ classes: list[AlertCategoryClass]
+
+
+class AlertClassesUpdate(BaseModel):
+ classes: dict = {}
+
+
+class AlertClassesEntry(AlertClassesUpdate):
+ id: int
+
+
+class AlertDismissArgs(BaseModel):
+ uuid: str
+
+
+class AlertDismissResult(BaseModel):
+ result: None
+
+
+class AlertListArgs(BaseModel):
+ pass
+
+
+class AlertListResult(BaseModel):
+ result: list[Alert]
+
+
+class AlertListCategoriesArgs(BaseModel):
+ pass
+
+
+class AlertListCategoriesResult(BaseModel):
+ result: list[AlertCategory]
+
+
+
+class AlertListPoliciesArgs(BaseModel):
+ pass
+
+
+class AlertListPoliciesResult(BaseModel):
+ result: list[str]
+
+
+class AlertOneshotCreateArgs(BaseModel):
+ klass: str
+ args: Any
+
+
+class AlertOneshotCreateResult(BaseModel):
+ result: None
+
+
+class AlertOneshotDeleteArgs(BaseModel):
+ klass: str | list[str]
+ query: Any = None
+
+
+class AlertOneshotDeleteResult(BaseModel):
+ result: None
+
+
+class AlertRestoreArgs(BaseModel):
+ uuid: str
+
+
+class AlertRestoreResult(BaseModel):
+ result: None
+
+
+class AlertClassesUpdateArgs(BaseModel):
+ alertclasses_update: AlertClassesUpdate = Field(default=AlertClassesUpdate())
+
+
+class AlertClassesUpdateResult(BaseModel):
+ result: AlertClassesEntry
diff --git a/src/middlewared/middlewared/api/v25_04_0/alertservice.py b/src/middlewared/middlewared/api/v25_04_0/alertservice.py
new file mode 100644
index 0000000000000..3d9195e24b855
--- /dev/null
+++ b/src/middlewared/middlewared/api/v25_04_0/alertservice.py
@@ -0,0 +1,60 @@
+from middlewared.api.base import BaseModel, NonEmptyString
+
+
+__all__ = [
+ 'AlertServiceEntry', 'AlertServiceCreateArgs', 'AlertServiceUpdateArgs', 'AlertServiceDeleteArgs',
+ 'AlertServiceTestArgs', 'AlertServiceCreateResult', 'AlertServiceUpdateResult', 'AlertServiceDeleteResult',
+ 'AlertServiceTestResult',
+]
+
+
+class AlertServiceCreate(BaseModel):
+ name: NonEmptyString
+ type: str
+ attributes: dict
+ level: str
+ enabled: bool = True
+
+
+class AlertServiceEntry(AlertServiceCreate):
+ id: int
+ type__title: str
+
+
+########### Arguments ###########
+
+
+class AlertServiceCreateArgs(BaseModel):
+ alert_service_create: AlertServiceCreate
+
+
+class AlertServiceUpdateArgs(BaseModel):
+ id: int
+ alert_service_update: AlertServiceCreate
+
+
+class AlertServiceDeleteArgs(BaseModel):
+ id: int
+
+
+class AlertServiceTestArgs(BaseModel):
+ alert_service_create: AlertServiceCreate
+
+
+########### Returns ###########
+
+
+class AlertServiceCreateResult(BaseModel):
+ result: AlertServiceEntry
+
+
+class AlertServiceUpdateResult(BaseModel):
+ result: AlertServiceEntry
+
+
+class AlertServiceDeleteResult(BaseModel):
+ result: bool
+
+
+class AlertServiceTestResult(BaseModel):
+ result: bool
diff --git a/src/middlewared/middlewared/api/v25_04_0/disk.py b/src/middlewared/middlewared/api/v25_04_0/disk.py
new file mode 100644
index 0000000000000..0b33148e93bf9
--- /dev/null
+++ b/src/middlewared/middlewared/api/v25_04_0/disk.py
@@ -0,0 +1,10 @@
+from middlewared.api.base import BaseModel
+from .alert import Alert
+
+
+class DiskTemperatureAlertsArgs(BaseModel):
+ names: list[str]
+
+
+class DiskTemperatureAlertsResult(BaseModel):
+ result: list[Alert]
diff --git a/src/middlewared/middlewared/api/v25_04_0/smartctl.py b/src/middlewared/middlewared/api/v25_04_0/smartctl.py
index 3fe99344846bb..16b4c8a3f3c41 100644
--- a/src/middlewared/middlewared/api/v25_04_0/smartctl.py
+++ b/src/middlewared/middlewared/api/v25_04_0/smartctl.py
@@ -1,5 +1,3 @@
-from typing import Any
-
from middlewared.api.base import BaseModel
__all__ = ["AtaSelfTest", "NvmeSelfTest", "ScsiSelfTest"]
diff --git a/src/middlewared/middlewared/api/v25_04_0/system_lifecycle.py b/src/middlewared/middlewared/api/v25_04_0/system_lifecycle.py
new file mode 100644
index 0000000000000..e85e7c9ee0f19
--- /dev/null
+++ b/src/middlewared/middlewared/api/v25_04_0/system_lifecycle.py
@@ -0,0 +1,32 @@
+from pydantic import Field
+
+from middlewared.api.base import BaseModel, NonEmptyString
+
+__all__ = ["SystemRebootArgs", "SystemRebootResult",
+ "SystemShutdownArgs", "SystemShutdownResult"]
+
+
+class SystemRebootOptions(BaseModel):
+ delay: int | None = None
+
+
+class SystemRebootArgs(BaseModel):
+ reason: NonEmptyString
+ options: SystemRebootOptions = Field(default=SystemRebootOptions())
+
+
+class SystemRebootResult(BaseModel):
+ result: None
+
+
+class SystemShutdownOptions(BaseModel):
+ delay: int | None = None
+
+
+class SystemShutdownArgs(BaseModel):
+ reason: NonEmptyString
+ options: SystemShutdownOptions = Field(default=SystemShutdownOptions())
+
+
+class SystemShutdownResult(BaseModel):
+ result: None
diff --git a/src/middlewared/middlewared/common/smart/smartctl.py b/src/middlewared/middlewared/common/smart/smartctl.py
index c5b3bed0c3e18..3d5345a7b8099 100644
--- a/src/middlewared/middlewared/common/smart/smartctl.py
+++ b/src/middlewared/middlewared/common/smart/smartctl.py
@@ -1,6 +1,7 @@
+from collections import namedtuple
import logging
+import os
import shlex
-from collections import namedtuple
from middlewared.utils import run
@@ -8,7 +9,7 @@
logger = logging.getLogger(__name__)
SMARTCTL_POWERMODES = ['NEVER', 'SLEEP', 'STANDBY', 'IDLE']
-SMARTCTX = namedtuple('smartctl_args', ['devices', 'enterprise_hardware'])
+SMARTCTX = namedtuple('smartctl_args', ['devices', 'enterprise_hardware', 'middleware'])
async def get_smartctl_args(context, disk, smartoptions):
@@ -31,7 +32,15 @@ async def get_smartctl_args(context, disk, smartoptions):
return [f"/dev/{disk}", "-d", "nvme"] + smartoptions
args = [f"/dev/{disk}"] + smartoptions
- if not enterprise_hardware and device['bus'] == 'USB':
+
+ sat = False
+ if enterprise_hardware:
+ if await context.middleware.run_in_thread(os.path.exists, f"/sys/block/{disk}/device/vpd_pg89"):
+ sat = True
+ else:
+ if device['bus'] == 'USB':
+ sat = True
+ if sat:
args = args + ["-d", "sat"]
return args
diff --git a/src/middlewared/middlewared/etc_files/smartd.py b/src/middlewared/middlewared/etc_files/smartd.py
index e35082042130b..c2470ae50e39c 100644
--- a/src/middlewared/middlewared/etc_files/smartd.py
+++ b/src/middlewared/middlewared/etc_files/smartd.py
@@ -93,7 +93,7 @@ async def render(service, middleware):
devices = await middleware.call("device.get_disks")
hardware = await middleware.call("truenas.is_ix_hardware")
- context = SMARTCTX(devices=devices, enterprise_hardware=hardware)
+ context = SMARTCTX(devices=devices, enterprise_hardware=hardware, middleware=middleware)
annotated = dict(filter(None, await asyncio_map(
lambda disk: annotate_disk_for_smart(context, disk["disk_name"], disk["disk_smartoptions"]),
[disk for disk in disks if disk["disk_name"] is not None],
diff --git a/src/middlewared/middlewared/event.py b/src/middlewared/middlewared/event.py
index d45c434ec415a..c8abb9865a2c1 100644
--- a/src/middlewared/middlewared/event.py
+++ b/src/middlewared/middlewared/event.py
@@ -6,7 +6,6 @@
from middlewared.role import RoleManager
from middlewared.schema import Any, clean_and_validate_arg, ValidationErrors
-from middlewared.settings import conf
class Events:
@@ -80,15 +79,6 @@ def __init__(self, middleware, name, arg, send_event, unsubscribe_all):
self._cancel_sync = threading.Event()
def send_event(self, event_type: str, **kwargs):
- if conf.debug_mode and event_type in ('ADDED', 'CHANGED'):
- verrors = ValidationErrors()
- clean_and_validate_arg(verrors, self.RETURNS[0], kwargs.get('fields'))
- if verrors:
- self.middleware.loop.call_soon_threadsafe(
- lambda: self.middleware.create_task(self.unsubscribe_all(verrors))
- )
- return
-
self.send_event_internal(event_type, **kwargs)
async def validate_arg(self):
diff --git a/src/middlewared/middlewared/main.py b/src/middlewared/middlewared/main.py
index 4f45b785ae09d..e8938d22a936f 100644
--- a/src/middlewared/middlewared/main.py
+++ b/src/middlewared/middlewared/main.py
@@ -13,8 +13,7 @@
from .pipe import Pipes, Pipe
from .restful import parse_credentials, authenticate, create_application, copy_multipart_to_pipe, RESTfulAPI
from .role import ROLES, RoleManager
-from .settings import conf
-from .schema import clean_and_validate_arg, Error as SchemaError, OROperator
+from .schema import Error as SchemaError, OROperator
import middlewared.service
from .service_exception import (
adapt_exception, CallError, CallException, ErrnoMixin, MatchNotFound, ValidationError, ValidationErrors,
@@ -1741,14 +1740,6 @@ def send_event(self, name, event_type: str, **kwargs):
assert event_type in ('ADDED', 'CHANGED', 'REMOVED')
- event_data = self.events.get_event(name)
- # TODO: Temporarily skip events which are CHANGED but have cleared set for validation as CHANGED
- # will be removed in next release and this case wouldn't be applicable
- if event_data and conf.debug_mode and event_type in ('ADDED', 'CHANGED'):
- verrors = ValidationErrors()
- clean_and_validate_arg(verrors, event_data['returns'][0], kwargs.get('fields'))
- verrors.check()
-
self.logger.trace(f'Sending event {name!r}:{event_type!r}:{kwargs!r}')
for session_id, wsclient in list(self.__wsclients.items()):
@@ -2186,7 +2177,6 @@ def main():
parser.add_argument('--loop-debug', action='store_true')
parser.add_argument('--trace-malloc', '-tm', action='store', nargs=2, type=int, default=False)
parser.add_argument('--overlay-dirs', '-o', action='append')
- parser.add_argument('--debug-mode', action='store_true', default=False)
parser.add_argument('--debug-level', choices=[
'TRACE',
'DEBUG',
@@ -2221,8 +2211,6 @@ def main():
with open(pidpath, "w") as _pidfile:
_pidfile.write(f"{str(os.getpid())}\n")
- conf.debug_mode = args.debug_mode
-
Middleware(
loop_debug=args.loop_debug,
loop_monitor=not args.disable_loop_monitor,
diff --git a/src/middlewared/middlewared/plugins/account_/2fa.py b/src/middlewared/middlewared/plugins/account_/2fa.py
index efc4b296a3c9e..5d0563f36089c 100644
--- a/src/middlewared/middlewared/plugins/account_/2fa.py
+++ b/src/middlewared/middlewared/plugins/account_/2fa.py
@@ -4,7 +4,7 @@
from middlewared.api import api_method
from middlewared.api.current import *
from middlewared.service import CallError, no_authz_required, pass_app, private, Service
-from middlewared.plugins.system.product import PRODUCT_NAME
+from middlewared.utils import ProductName
from middlewared.utils.privilege import app_credential_full_admin_or_user
@@ -17,7 +17,7 @@ async def provisioning_uri_internal(self, username, user_twofactor_config):
digits=user_twofactor_config['otp_digits'],
).provisioning_uri(
f'{username}-{await self.middleware.call("system.hostname")}'
- f'@{PRODUCT_NAME}',
+ f'@{ProductName.PRODUCT_NAME}',
'iXsystems'
)
diff --git a/src/middlewared/middlewared/plugins/acme_protocol.py b/src/middlewared/middlewared/plugins/acme_protocol.py
index 72dd9a173e788..d29bae8e750a7 100644
--- a/src/middlewared/middlewared/plugins/acme_protocol.py
+++ b/src/middlewared/middlewared/plugins/acme_protocol.py
@@ -2,9 +2,14 @@
import json
import requests
-from middlewared.plugins.acme_protocol_.authenticators.factory import auth_factory
-from middlewared.schema import Bool, Dict, Int, Patch, Str, ValidationErrors
-from middlewared.service import accepts, CallError, CRUDService, private
+from middlewared.api import api_method
+from middlewared.api.current import (
+ ACMERegistrationCreateArgs, ACMERegistrationCreateResult, DNSAuthenticatorUpdateArgs, DNSAuthenticatorUpdateResult,
+ DNSAuthenticatorCreateArgs, DNSAuthenticatorCreateResult, DNSAuthenticatorDeleteArgs, DNSAuthenticatorDeleteResult,
+ ACMERegistrationEntry, ACMEDNSAuthenticatorEntry,
+)
+from middlewared.schema import ValidationErrors
+from middlewared.service import CallError, CRUDService, private
import middlewared.sqlalchemy as sa
from acme import client, messages
@@ -45,6 +50,7 @@ class Config:
datastore_extend = 'acme.registration.register_extend'
namespace = 'acme.registration'
private = True
+ entry = ACMERegistrationEntry
@private
async def register_extend(self, data):
@@ -70,18 +76,7 @@ def get_directory(self, acme_directory_uri):
except (requests.ConnectionError, requests.Timeout, json.JSONDecodeError, KeyError) as e:
raise CallError(f'Unable to retrieve directory : {e}')
- @accepts(
- Dict(
- 'acme_registration_create',
- Bool('tos', default=False),
- Dict(
- 'JWK_create',
- Int('key_size', default=2048),
- Int('public_exponent', default=65537)
- ),
- Str('acme_directory_uri', required=True),
- )
- )
+ @api_method(ACMERegistrationCreateArgs, ACMERegistrationCreateResult)
def do_create(self, data):
"""
Register with ACME Server
@@ -210,22 +205,7 @@ class Config:
namespace = 'acme.dns.authenticator'
datastore = 'system.acmednsauthenticator'
cli_namespace = 'system.acme.dns_auth'
-
- ENTRY = Dict(
- 'acme_dns_authenticator_entry',
- Int('id', required=True),
- Str(
- 'authenticator', enum=[authenticator for authenticator in auth_factory.get_authenticators()],
- required=True
- ),
- Dict(
- 'attributes',
- additional_attrs=True,
- description='Specific attributes of each `authenticator`',
- private=True,
- ),
- Str('name', description='User defined name of authenticator', required=True),
- )
+ entry = ACMEDNSAuthenticatorEntry
@private
async def common_validation(self, data, schema_name, old=None):
@@ -246,6 +226,7 @@ async def common_validation(self, data, schema_name, old=None):
verrors.check()
+ @api_method(DNSAuthenticatorCreateArgs, DNSAuthenticatorCreateResult)
async def do_create(self, data):
"""
Create a DNS Authenticator
@@ -282,16 +263,7 @@ async def do_create(self, data):
return await self.get_instance(id_)
- @accepts(
- Int('id'),
- Patch(
- 'acme_dns_authenticator_entry',
- 'dns_authenticator_update',
- ('rm', {'name': 'id'}),
- ('rm', {'name': 'authenticator'}),
- ('attr', {'update': True}),
- ),
- )
+ @api_method(DNSAuthenticatorUpdateArgs, DNSAuthenticatorUpdateResult)
async def do_update(self, id_, data):
"""
Update DNS Authenticator of `id`
@@ -332,6 +304,7 @@ async def do_update(self, id_, data):
return await self.get_instance(id_)
+ @api_method(DNSAuthenticatorDeleteArgs, DNSAuthenticatorDeleteResult)
async def do_delete(self, id_):
"""
Delete DNS Authenticator of `id`
diff --git a/src/middlewared/middlewared/plugins/acme_protocol_/schema.py b/src/middlewared/middlewared/plugins/acme_protocol_/schema.py
index 153a31a216611..9f9015071c168 100644
--- a/src/middlewared/middlewared/plugins/acme_protocol_/schema.py
+++ b/src/middlewared/middlewared/plugins/acme_protocol_/schema.py
@@ -1,5 +1,6 @@
-from middlewared.schema import Bool, Dict, Str, List, returns
-from middlewared.service import accepts, private, Service
+from middlewared.api import api_method
+from middlewared.api.current import DNSAuthenticatorSchemasArgs, DNSAuthenticatorSchemasResult
+from middlewared.service import private, Service
from .authenticators.factory import auth_factory
@@ -13,26 +14,7 @@ def __init__(self, *args, **kwargs):
super(DNSAuthenticatorService, self).__init__(*args, **kwargs)
self.schemas = self.get_authenticator_schemas()
- @accepts(roles=['READONLY_ADMIN'])
- @returns(List(
- title='Authenticator Schemas',
- items=[Dict(
- 'schema_entry',
- Str('key', required=True),
- List(
- 'schema',
- items=[Dict(
- 'attribute_schema',
- Str('_name_', required=True),
- Str('title', required=True),
- Bool('_required_', required=True),
- additional_attrs=True,
- title='Attribute Schema',
- )],
- ),
- title='Authenticator Schema'
- )],
- ))
+ @api_method(DNSAuthenticatorSchemasArgs, DNSAuthenticatorSchemasResult, roles=['READONLY_ADMIN'])
def authenticator_schemas(self):
"""
Get the schemas for all DNS providers we support for ACME DNS Challenge and the respective attributes
diff --git a/src/middlewared/middlewared/plugins/alert.py b/src/middlewared/middlewared/plugins/alert.py
index b4d1cd4f5470d..bd5b225944694 100644
--- a/src/middlewared/middlewared/plugins/alert.py
+++ b/src/middlewared/middlewared/plugins/alert.py
@@ -28,7 +28,16 @@
ProThreadedAlertService,
)
from middlewared.alert.base import UnavailableException, AlertService as _AlertService
-from middlewared.schema import accepts, Any, Bool, Datetime, Dict, Int, List, OROperator, Patch, returns, Ref, Str
+from middlewared.api import api_method
+from middlewared.api.current import (
+ AlertDismissArgs, AlertDismissResult, AlertListArgs, AlertListResult, AlertListCategoriesArgs,
+ AlertListCategoriesResult, AlertListPoliciesArgs, AlertListPoliciesResult, AlertRestoreArgs, AlertRestoreResult,
+ AlertOneshotCreateArgs, AlertOneshotCreateResult, AlertOneshotDeleteArgs, AlertOneshotDeleteResult,
+ AlertServiceCreateArgs, AlertServiceCreateResult, AlertServiceUpdateArgs, AlertServiceUpdateResult,
+ AlertServiceDeleteArgs, AlertServiceDeleteResult, AlertServiceTestArgs, AlertServiceTestResult,
+ AlertServiceEntry,
+)
+from middlewared.schema import Bool, Dict, Int, Str
from middlewared.service import (
ConfigService, CRUDService, Service, ValidationErrors,
job, periodic, private,
@@ -298,27 +307,14 @@ async def initialize(self, load=True):
async def terminate(self):
await self.flush_alerts()
- @accepts(roles=['ALERT_LIST_READ'])
- @returns(List('alert_policies', items=[Str('policy', enum=POLICIES)]))
+ @api_method(AlertListPoliciesArgs, AlertListPoliciesResult, roles=['ALERT_LIST_READ'])
async def list_policies(self):
"""
List all alert policies which indicate the frequency of the alerts.
"""
return POLICIES
- @accepts(roles=['ALERT_LIST_READ'])
- @returns(List('categories', items=[Dict(
- 'category',
- Str('id'),
- Str('title'),
- List('classes', items=[Dict(
- 'category_class',
- Str('id'),
- Str('title'),
- Str('level'),
- Bool('proactive_support'),
- )])
- )]))
+ @api_method(AlertListCategoriesArgs, AlertListCategoriesResult, roles=['ALERT_LIST_READ'])
async def list_categories(self):
"""
List all types of alerts which the system can issue.
@@ -351,26 +347,7 @@ async def list_categories(self):
if any(alert_class.category == alert_category for alert_class in classes)
]
- @accepts(roles=['ALERT_LIST_READ'])
- @returns(List('alerts', items=[Dict(
- 'alert',
- Str('uuid'),
- Str('source'),
- Str('klass'),
- Any('args'),
- Str('node'),
- Str('key', max_length=None),
- Datetime('datetime'),
- Datetime('last_occurrence'),
- Bool('dismissed'),
- Any('mail', null=True),
- Str('text', max_length=None),
- Str('id'),
- Str('level'),
- Str('formatted', null=True, max_length=None),
- Bool('one_shot'),
- register=True,
- )]))
+ @api_method(AlertListArgs, AlertListResult, roles=['ALERT_LIST_READ'])
async def list(self):
"""
List all types of alerts including active/dismissed currently in the system.
@@ -423,8 +400,7 @@ def __alert_by_uuid(self, uuid):
except IndexError:
return None
- @accepts(Str("uuid"))
- @returns()
+ @api_method(AlertDismissArgs, AlertDismissResult)
async def dismiss(self, uuid):
"""
Dismiss `id` alert.
@@ -460,8 +436,7 @@ def _delete_on_dismiss(self, alert):
if removed:
self._send_alert_deleted_event(alert)
- @accepts(Str("uuid"))
- @returns()
+ @api_method(AlertRestoreArgs, AlertRestoreResult)
async def restore(self, uuid):
"""
Restore `id` alert which had been dismissed.
@@ -911,8 +886,7 @@ async def flush_alerts(self):
del d["mail"]
await self.middleware.call("datastore.insert", "system.alert", d)
- @private
- @accepts(Str("klass"), Any("args", null=True))
+ @api_method(AlertOneshotCreateArgs, AlertOneshotCreateResult, private=True)
@job(lock="process_alerts", transient=True)
async def oneshot_create(self, job, klass, args):
"""
@@ -948,13 +922,7 @@ async def oneshot_create(self, job, klass, args):
await self.middleware.call("alert.send_alerts")
- @private
- @accepts(
- OROperator(
- Str("klass"),
- List('klass', items=[Str('klassname')], default=None),
- ),
- Any("query", null=True, default=None))
+ @api_method(AlertOneshotDeleteArgs, AlertOneshotDeleteResult, private=True)
@job(lock="process_alerts", transient=True)
async def oneshot_delete(self, job, klass, query):
"""
@@ -1030,30 +998,7 @@ class Config:
datastore_extend = "alertservice._extend"
datastore_order_by = ["name"]
cli_namespace = "system.alert.service"
-
- ENTRY = Patch(
- 'alert_service_create', 'alertservice_entry',
- ('add', Int('id')),
- ('add', Str('type__title')),
- )
-
- @accepts()
- @returns(List('alert_service_types', items=[Dict(
- 'alert_service_type',
- Str('name', required=True),
- Str('title', required=True),
- )]))
- async def list_types(self):
- """
- List all types of supported Alert services which can be configured with the system.
- """
- return [
- {
- "name": name,
- "title": factory.title,
- }
- for name, factory in sorted(ALERT_SERVICES_FACTORIES.items(), key=lambda i: i[1].title.lower())
- ]
+ entry = AlertServiceEntry
@private
async def _extend(self, service):
@@ -1078,21 +1023,18 @@ async def _validate(self, service, schema_name):
if factory is None:
verrors.add(f"{schema_name}.type", "This field has invalid value")
raise verrors
+
+ levels = AlertLevel.__members__
+ if service["level"] not in levels:
+ verrors.add(f"{schema_name}.level", f"Level must be one of {list(levels)}")
+ raise verrors
verrors.add_child(f"{schema_name}.attributes",
validate_schema(list(factory.schema.attrs.values()), service["attributes"]))
verrors.check()
- @accepts(Dict(
- "alert_service_create",
- Str("name", required=True, empty=False),
- Str("type", required=True),
- Dict("attributes", required=True, additional_attrs=True),
- Str("level", required=True, enum=list(AlertLevel.__members__)),
- Bool("enabled", default=True),
- register=True,
- ))
+ @api_method(AlertServiceCreateArgs, AlertServiceCreateResult)
async def do_create(self, data):
"""
Create an Alert Service of specified `type`.
@@ -1129,11 +1071,7 @@ async def do_create(self, data):
return await self.get_instance(data["id"])
- @accepts(Int("id"), Patch(
- "alert_service_create",
- "alert_service_update",
- ("attr", {"update": True}),
- ))
+ @api_method(AlertServiceUpdateArgs, AlertServiceUpdateResult)
async def do_update(self, id_, data):
"""
Update Alert Service of `id`.
@@ -1153,17 +1091,14 @@ async def do_update(self, id_, data):
return await self.get_instance(id_)
- @accepts(Int("id"))
+ @api_method(AlertServiceDeleteArgs, AlertServiceDeleteResult)
async def do_delete(self, id_):
"""
Delete Alert Service of `id`.
"""
return await self.middleware.call("datastore.delete", self._config.datastore, id_)
- @accepts(
- Ref('alert_service_create')
- )
- @returns(Bool('successful_test', description='Is `true` if test is successful'))
+ @api_method(AlertServiceTestArgs, AlertServiceTestResult)
async def test(self, data):
"""
Send a test alert using `type` of Alert Service.
@@ -1234,7 +1169,7 @@ class AlertClassesService(ConfigService):
class Config:
datastore = "system.alertclasses"
cli_namespace = "system.alert.class"
-
+
ENTRY = Dict(
"alertclasses_entry",
Int("id"),
diff --git a/src/middlewared/middlewared/plugins/auth.py b/src/middlewared/middlewared/plugins/auth.py
index 95a41556c883d..073e7c46f239f 100644
--- a/src/middlewared/middlewared/plugins/auth.py
+++ b/src/middlewared/middlewared/plugins/auth.py
@@ -293,24 +293,6 @@ async def terminate_other_sessions(self, app):
if errors:
raise CallError("\n".join(["Unable to terminate all sessions:"] + errors))
- @cli_private
- @accepts(Str('username'), Password('password'))
- @returns(Bool(description='Is `true` if `username` was successfully validated with provided `password`'))
- async def check_user(self, username, password):
- """
- Verify username and password
- """
- return await self.check_password(username, password)
-
- @cli_private
- @accepts(Str('username'), Password('password'))
- @returns(Bool(description='Is `true` if `username` was successfully validated with provided `password`'))
- async def check_password(self, username, password):
- """
- Verify username and password
- """
- return await self.middleware.call('auth.authenticate', username, password) is not None
-
@no_auth_required
@accepts(
Int('ttl', default=600, null=True),
diff --git a/src/middlewared/middlewared/plugins/boot.py b/src/middlewared/middlewared/plugins/boot.py
index 30e1577c92e5b..ecb4781ba504c 100644
--- a/src/middlewared/middlewared/plugins/boot.py
+++ b/src/middlewared/middlewared/plugins/boot.py
@@ -208,14 +208,6 @@ async def set_scrub_interval(self, interval):
)
return interval
- @accepts(roles=['READONLY_ADMIN'])
- @returns(Int('interval'))
- async def get_scrub_interval(self):
- """
- Get Automatic Scrub Interval value in days.
- """
- return (await self.middleware.call('system.advanced.config'))['boot_scrub']
-
@asynccontextmanager
async def __toggle_rootfs_readwrite(self):
mnt = await self.middleware.call('filesystem.mount_info', [['mountpoint', '=', '/']], {'get': True})
diff --git a/src/middlewared/middlewared/plugins/catalog/update.py b/src/middlewared/middlewared/plugins/catalog/update.py
index 94d8cbb142339..499add8c156fb 100644
--- a/src/middlewared/middlewared/plugins/catalog/update.py
+++ b/src/middlewared/middlewared/plugins/catalog/update.py
@@ -5,6 +5,7 @@
from middlewared.plugins.docker.state_utils import catalog_ds_path, CATALOG_DATASET_NAME
from middlewared.schema import accepts, Dict, List, returns, Str
from middlewared.service import ConfigService, private, ValidationErrors
+from middlewared.utils import ProductType
from middlewared.validators import Match
from .utils import OFFICIAL_ENTERPRISE_TRAIN, OFFICIAL_LABEL, TMP_IX_APPS_CATALOGS
@@ -30,18 +31,18 @@ class Config:
role_prefix = 'CATALOG'
ENTRY = Dict(
- 'catalog_create',
- List('preferred_trains'),
- Str('id'),
- Str(
- 'label', required=True, validators=[Match(
- r'^\w+[\w.-]*$',
- explanation='Label must start with an alphanumeric character and can include dots and dashes.'
- )],
- max_length=60,
- ),
- register=True,
- )
+ 'catalog_create',
+ List('preferred_trains'),
+ Str('id'),
+ Str(
+ 'label', required=True, validators=[Match(
+ r'^\w+[\w.-]*$',
+ explanation='Label must start with an alphanumeric character and can include dots and dashes.'
+ )],
+ max_length=60,
+ ),
+ register=True,
+ )
@private
def extend(self, data, context):
@@ -92,7 +93,7 @@ async def common_validation(self, schema, data):
'At least 1 preferred train must be specified.'
)
if (
- await self.middleware.call('system.product_type') == 'SCALE_ENTERPRISE' and
+ await self.middleware.call('system.product_type') == ProductType.SCALE_ENTERPRISE and
OFFICIAL_ENTERPRISE_TRAIN not in data['preferred_trains']
):
verrors.add(
@@ -122,7 +123,7 @@ async def do_update(self, data):
@private
async def update_train_for_enterprise(self):
catalog = await self.middleware.call('catalog.config')
- if await self.middleware.call('system.product_type') == 'SCALE_ENTERPRISE':
+ if await self.middleware.call('system.product_type') == ProductType.SCALE_ENTERPRISE:
preferred_trains = []
# Logic coming from here
# https://github.com/truenas/middleware/blob/e7f2b29b6ff8fadcc9fdd8d7f104cbbf5172fc5a/src/middlewared
diff --git a/src/middlewared/middlewared/plugins/cloud_sync.py b/src/middlewared/middlewared/plugins/cloud_sync.py
index d01f5aff9b9ef..913a7ea6c4261 100644
--- a/src/middlewared/middlewared/plugins/cloud_sync.py
+++ b/src/middlewared/middlewared/plugins/cloud_sync.py
@@ -10,9 +10,10 @@
from middlewared.plugins.cloud.model import CloudTaskModelMixin, cloud_task_schema
from middlewared.plugins.cloud.path import get_remote_path, check_local_path
from middlewared.plugins.cloud.remotes import REMOTES, remote_classes
+from middlewared.rclone.remote.storjix import StorjIxError
from middlewared.schema import accepts, Bool, Cron, Dict, Int, Password, Patch, Str
from middlewared.service import (
- CallError, CRUDService, ValidationErrors, item_method, job, pass_app, private, TaskPathService,
+ CallError, CRUDService, ValidationError, ValidationErrors, item_method, job, pass_app, private, TaskPathService,
)
import middlewared.sqlalchemy as sa
from middlewared.utils import Popen, run
@@ -915,7 +916,10 @@ async def create_bucket(self, credentials_id, name):
if not provider.can_create_bucket:
raise CallError("This provider can't create buckets")
- await provider.create_bucket(credentials, name)
+ try:
+ await provider.create_bucket(credentials, name)
+ except StorjIxError as e:
+ raise ValidationError("cloudsync.create_bucket", e.errmsg, e.errno)
@accepts(Int("credentials_id"), roles=["CLOUD_SYNC_WRITE"])
async def list_buckets(self, credentials_id):
diff --git a/src/middlewared/middlewared/plugins/config.py b/src/middlewared/middlewared/plugins/config.py
index 64b77b5446c1a..9ad8e7df10088 100644
--- a/src/middlewared/middlewared/plugins/config.py
+++ b/src/middlewared/middlewared/plugins/config.py
@@ -9,7 +9,7 @@
import tempfile
from middlewared.schema import accepts, Bool, Dict, returns
-from middlewared.service import CallError, Service, job, private
+from middlewared.service import CallError, Service, job, pass_app, private
from middlewared.plugins.pwenc import PWENC_FILE_SECRET
from middlewared.utils.db import FREENAS_DATABASE
@@ -26,6 +26,8 @@
TRUENAS_ADMIN_KEYS_UPLOADED = '/data/truenas_admin_authorized_keys_uploaded'
ROOT_KEYS_UPLOADED = '/data/root_authorized_keys_uploaded'
DATABASE_NAME = os.path.basename(FREENAS_DATABASE)
+CONFIGURATION_UPLOAD_REBOOT_REASON = 'Configuration upload'
+CONFIGURATION_RESET_REBOOT_REASON = 'Configuration reset'
class ConfigService(Service):
@@ -84,7 +86,8 @@ async def save(self, job, options):
@accepts()
@returns()
@job(pipes=["input"])
- def upload(self, job):
+ @pass_app(rest=True)
+ def upload(self, app, job):
"""
Accepts a configuration file via job pipe.
"""
@@ -105,7 +108,10 @@ def upload(self, job):
is_tar = tarfile.is_tarfile(stf.name)
self.upload_impl(stf.name, is_tar_file=is_tar)
- self.middleware.run_coroutine(self.middleware.call('system.reboot', {'delay': 10}), wait=False)
+ self.middleware.run_coroutine(
+ self.middleware.call('system.reboot', CONFIGURATION_UPLOAD_REBOOT_REASON, {'delay': 10}, app=app),
+ wait=False,
+ )
@private
def upload_impl(self, file_or_tar, is_tar_file=False):
@@ -180,7 +186,7 @@ def upload_impl(self, file_or_tar, is_tar_file=False):
'failover.call_remote', 'core.call_hook', ['config.on_upload', [UPLOADED_DB_PATH]]
)
self.middleware.run_coroutine(
- self.middleware.call('failover.call_remote', 'system.reboot'),
+ self.middleware.call('failover.call_remote', 'system.reboot', [CONFIGURATION_UPLOAD_REBOOT_REASON]),
wait=False,
)
except Exception as e:
@@ -193,7 +199,8 @@ def upload_impl(self, file_or_tar, is_tar_file=False):
@accepts(Dict('options', Bool('reboot', default=True)))
@returns()
@job(lock='config_reset', logs=True)
- def reset(self, job, options):
+ @pass_app(rest=True)
+ def reset(self, app, job, options):
"""
Reset database to configuration defaults.
@@ -224,7 +231,9 @@ def reset(self, job, options):
if options['reboot']:
self.middleware.run_coroutine(
- self.middleware.call('failover.call_remote', 'system.reboot'),
+ self.middleware.call(
+ 'failover.call_remote', 'system.reboot', [CONFIGURATION_UPLOAD_REBOOT_REASON],
+ ),
wait=False,
)
except Exception as e:
@@ -240,7 +249,8 @@ def reset(self, job, options):
if options['reboot']:
job.set_progress(95, 'Will reboot in 10 seconds')
self.middleware.run_coroutine(
- self.middleware.call('system.reboot', {'delay': 10}), wait=False,
+ self.middleware.call('system.reboot', CONFIGURATION_RESET_REBOOT_REASON, {'delay': 10}, app=app),
+ wait=False,
)
@private
diff --git a/src/middlewared/middlewared/plugins/crypto_/cert_info.py b/src/middlewared/middlewared/plugins/crypto_/cert_info.py
index e3ec074abab75..64ca0aecaa6cb 100644
--- a/src/middlewared/middlewared/plugins/crypto_/cert_info.py
+++ b/src/middlewared/middlewared/plugins/crypto_/cert_info.py
@@ -47,17 +47,6 @@ async def ec_curve_choices(self):
"""
return {k: k for k in EC_CURVES}
- @accepts()
- @returns(Dict(
- 'private_key_type_choices',
- *[Str(k, enum=[k]) for k in ('RSA', 'EC')]
- ))
- async def key_type_choices(self):
- """
- Dictionary of supported key types for certificates.
- """
- return {k: k for k in ['RSA', 'EC']}
-
@accepts()
@returns(Dict(
'extended_key_usage_choices',
diff --git a/src/middlewared/middlewared/plugins/device_/device_info.py b/src/middlewared/middlewared/plugins/device_/device_info.py
index 24819bc37317e..5aed4788bdca2 100644
--- a/src/middlewared/middlewared/plugins/device_/device_info.py
+++ b/src/middlewared/middlewared/plugins/device_/device_info.py
@@ -291,19 +291,3 @@ def get_gpus(self):
for gpu in gpus:
gpu['available_to_host'] = gpu['addr']['pci_slot'] not in to_isolate_gpus
return gpus
-
- @accepts()
- @returns(Dict(
- 'gpu_pci_id_choices',
- additional_attrs=True,
- description='Returns PCI id(s) of GPU(s) located in the system',
- example={'Red Hat, Inc. QXL paravirtual graphic card': '0000:00:02.0'}
- ))
- async def gpu_pci_ids_choices(self):
- """
- Retrieve choices for GPU PCI ids located in the system.
- """
- return {
- gpu['description'] or gpu['vendor'] or gpu['addr']['pci_slot']: gpu['addr']['pci_slot']
- for gpu in await self.middleware.call('device.get_gpus')
- }
diff --git a/src/middlewared/middlewared/plugins/directoryservices_/activedirectory_health_mixin.py b/src/middlewared/middlewared/plugins/directoryservices_/activedirectory_health_mixin.py
index be90a3099f367..c586a8e8f72bc 100644
--- a/src/middlewared/middlewared/plugins/directoryservices_/activedirectory_health_mixin.py
+++ b/src/middlewared/middlewared/plugins/directoryservices_/activedirectory_health_mixin.py
@@ -108,6 +108,11 @@ def _recover_ad(self, error: ADHealthError) -> None:
self._recover_secrets()
case ADHealthCheckFailReason.AD_SECRET_ENTRY_MISSING:
self._recover_secrets()
+ case ADHealthCheckFailReason.AD_NETLOGON_FAILURE:
+ # It's possible that our smb.conf has incorrect
+ # information in it. We'll try to regenerate the config
+ # file and the restart winbindd for good measure
+ self.middleware.call_sync('etc.generate', 'smb')
case ADHealthCheckFailReason.WINBIND_STOPPED:
# pick up winbind restart below
pass
diff --git a/src/middlewared/middlewared/plugins/directoryservices_/kerberos_health_mixin.py b/src/middlewared/middlewared/plugins/directoryservices_/kerberos_health_mixin.py
index 80437327545be..a57cf724ab18e 100644
--- a/src/middlewared/middlewared/plugins/directoryservices_/kerberos_health_mixin.py
+++ b/src/middlewared/middlewared/plugins/directoryservices_/kerberos_health_mixin.py
@@ -86,7 +86,7 @@ def _health_check_krb5(self) -> None:
faulted_reason
)
- if not krb5.gss_get_current_cred(krb5_constants.krb5ccache.SYSTEM.value):
+ if not krb5.gss_get_current_cred(krb5_constants.krb5ccache.SYSTEM.value, raise_error=False):
faulted_reason = (
'Kerberos ticket for domain is expired. Failure to renew '
'kerberos ticket may indicate issues with DNS resolution or '
diff --git a/src/middlewared/middlewared/plugins/disk.py b/src/middlewared/middlewared/plugins/disk.py
index 3419fb3927513..2248a15e6eb6d 100644
--- a/src/middlewared/middlewared/plugins/disk.py
+++ b/src/middlewared/middlewared/plugins/disk.py
@@ -3,6 +3,7 @@
import subprocess
from sqlalchemy.exc import IntegrityError
+from middlewared.utils import ProductType
from middlewared.schema import accepts, Bool, Datetime, Dict, Int, Patch, Str
from middlewared.service import filterable, private, CallError, CRUDService, ValidationError
import middlewared.sqlalchemy as sa
@@ -465,7 +466,7 @@ async def configure_power_management(self):
This runs on boot to properly configure all power management options
(Advanced Power Management and IDLE) for all disks.
"""
- if await self.middleware.call('system.product_type') != 'SCALE_ENTERPRISE':
+ if await self.middleware.call('system.product_type') != ProductType.SCALE_ENTERPRISE:
for disk in await self.middleware.call('disk.query'):
await self.middleware.call('disk.power_management', disk['name'], disk)
diff --git a/src/middlewared/middlewared/plugins/disk_/format.py b/src/middlewared/middlewared/plugins/disk_/format.py
index aa5852329433a..a20f2614ac1c4 100644
--- a/src/middlewared/middlewared/plugins/disk_/format.py
+++ b/src/middlewared/middlewared/plugins/disk_/format.py
@@ -20,6 +20,16 @@ def format(self, disk):
raise CallError(f'Disk: {disk!r} is incorrectly formatted with Data Integrity Feature (DIF).')
dev = parted.getDevice(f'/dev/{disk}')
+ # it's important we remove this device from the global cache
+ # so that libparted probes the disk for the latest up-to-date
+ # information. This becomes _very_ important, for example,
+ # when we overprovision disk devices. If the disk is overprovisioned
+ # to a larger/smaller size, then libparted has possibility of
+ # referencing the old disk size. So depending on the direction of
+ # the resize operation, the `clobber()` operation can run off of
+ # the end of the disk and raise an IO failure. We actually saw this
+ # interally during testing
+ dev._Device__device.cache_remove()
for i in range(2):
if not dev.clobber():
# clobber() wipes partition label info from disk but during testing
diff --git a/src/middlewared/middlewared/plugins/disk_/smartctl.py b/src/middlewared/middlewared/plugins/disk_/smartctl.py
index c3857a8f0ad50..de51e403feb01 100644
--- a/src/middlewared/middlewared/plugins/disk_/smartctl.py
+++ b/src/middlewared/middlewared/plugins/disk_/smartctl.py
@@ -21,7 +21,7 @@ async def update():
devices = await self.middleware.call("device.get_disks")
hardware = await self.middleware.call("truenas.is_ix_hardware")
- context = SMARTCTX(devices=devices, enterprise_hardware=hardware)
+ context = SMARTCTX(devices=devices, enterprise_hardware=hardware, middleware=self.middleware)
self.smartctl_args_for_disk = dict(zip(
[disk["name"] for disk in disks],
await asyncio_map(
@@ -57,7 +57,7 @@ async def smartctl(self, disk, args, options):
else:
devices = await self.middleware.call('device.get_disks')
hardware = await self.middleware.call('truenas.is_ix_hardware')
- context = SMARTCTX(devices=devices, enterprise_hardware=hardware)
+ context = SMARTCTX(devices=devices, enterprise_hardware=hardware, middleware=self.middleware)
if disks := await self.middleware.call('disk.query', [['name', '=', disk]]):
smartoptions = disks[0]['smartoptions']
else:
diff --git a/src/middlewared/middlewared/plugins/disk_/temperature.py b/src/middlewared/middlewared/plugins/disk_/temperature.py
index 2b04cc6e87c95..5901efb1c0d5b 100644
--- a/src/middlewared/middlewared/plugins/disk_/temperature.py
+++ b/src/middlewared/middlewared/plugins/disk_/temperature.py
@@ -5,8 +5,10 @@
import async_timeout
+from middlewared.api import api_method
+from middlewared.api.current import DiskTemperatureAlertsArgs, DiskTemperatureAlertsResult
from middlewared.common.smart.smartctl import SMARTCTL_POWERMODES
-from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, returns, Str
+from middlewared.schema import accepts, Bool, Dict, Int, List, returns, Str
from middlewared.service import private, Service
from middlewared.utils.asyncio_ import asyncio_map
from middlewared.utils.disks import parse_smartctl_for_temperature_output
@@ -139,8 +141,7 @@ def temperature_agg(self, names, days):
return final
- @accepts(List('names', items=[Str('name')]), roles=['REPORTING_READ'])
- @returns(Ref('alert'))
+ @api_method(DiskTemperatureAlertsArgs, DiskTemperatureAlertsResult, roles=['REPORTING_READ'])
async def temperature_alerts(self, names):
"""
Returns existing temperature alerts for specified disk `names.`
diff --git a/src/middlewared/middlewared/plugins/docker/fs_manage.py b/src/middlewared/middlewared/plugins/docker/fs_manage.py
index 9629294e7423e..46dfc82ce94a9 100644
--- a/src/middlewared/middlewared/plugins/docker/fs_manage.py
+++ b/src/middlewared/middlewared/plugins/docker/fs_manage.py
@@ -18,7 +18,7 @@ async def common_func(self, mount):
await self.middleware.call('zfs.dataset.mount', docker_ds, {'recursive': True, 'force_mount': True})
else:
await self.middleware.call('zfs.dataset.umount', docker_ds, {'force': True})
- await self.middleware.call('catalog.sync')
+ return await self.middleware.call('catalog.sync')
except Exception as e:
await self.middleware.call(
'docker.state.set_status', Status.FAILED.value,
@@ -27,10 +27,10 @@ async def common_func(self, mount):
raise
async def mount(self):
- await self.common_func(True)
+ return await self.common_func(True)
async def umount(self):
- await self.common_func(False)
+ return await self.common_func(False)
async def ix_apps_is_mounted(self, dataset_to_check=None):
"""
diff --git a/src/middlewared/middlewared/plugins/docker/state_setup.py b/src/middlewared/middlewared/plugins/docker/state_setup.py
index e1250614de7f6..cf5253c62b24e 100644
--- a/src/middlewared/middlewared/plugins/docker/state_setup.py
+++ b/src/middlewared/middlewared/plugins/docker/state_setup.py
@@ -75,7 +75,9 @@ async def status_change(self):
await self.create_update_docker_datasets(config['dataset'])
# Docker dataset would not be mounted at this point, so we will explicitly mount them now
- await self.middleware.call('docker.fs_manage.mount')
+ catalog_sync_job = await self.middleware.call('docker.fs_manage.mount')
+ if catalog_sync_job:
+ await catalog_sync_job.wait()
await self.middleware.call('docker.state.start_service')
self.middleware.create_task(self.middleware.call('docker.state.periodic_check'))
diff --git a/src/middlewared/middlewared/plugins/docker/update.py b/src/middlewared/middlewared/plugins/docker/update.py
index f16e494be0585..1058ba864947d 100644
--- a/src/middlewared/middlewared/plugins/docker/update.py
+++ b/src/middlewared/middlewared/plugins/docker/update.py
@@ -19,10 +19,7 @@ class DockerModel(sa.Model):
pool = sa.Column(sa.String(255), default=None, nullable=True)
enable_image_updates = sa.Column(sa.Boolean(), default=True)
nvidia = sa.Column(sa.Boolean(), default=False)
- address_pools = sa.Column(sa.JSON(list), default=[
- {'base': '172.30.0.0/16', 'size': 27},
- {'base': '172.31.0.0/16', 'size': 27}
- ])
+ address_pools = sa.Column(sa.JSON(list), default=[{'base': '172.17.0.0/12', 'size': 24}])
class DockerService(ConfigService):
@@ -96,8 +93,9 @@ async def do_update(self, job, data):
except Exception as e:
raise CallError(f'Failed to stop docker service: {e}')
+ catalog_sync_job = None
try:
- await self.middleware.call('docker.fs_manage.umount')
+ catalog_sync_job = await self.middleware.call('docker.fs_manage.umount')
except CallError as e:
# We handle this specially, if for whatever reason ix-apps dataset is not there,
# we don't make it fatal to change pools etc - however if some dataset other then
@@ -105,6 +103,9 @@ async def do_update(self, job, data):
# and needs to be fixed before we can proceed
if e.errno != errno.ENOENT or await self.middleware.call('docker.fs_manage.ix_apps_is_mounted'):
raise
+ finally:
+ if catalog_sync_job:
+ await catalog_sync_job.wait()
await self.middleware.call('docker.state.set_status', Status.UNCONFIGURED.value)
diff --git a/src/middlewared/middlewared/plugins/enclosure.py b/src/middlewared/middlewared/plugins/enclosure.py
index 6572df84916f7..fa5d78ab0a6a1 100644
--- a/src/middlewared/middlewared/plugins/enclosure.py
+++ b/src/middlewared/middlewared/plugins/enclosure.py
@@ -314,29 +314,6 @@ async def delayed():
if disk_enclosure != disk['enclosure']:
self.middleware.call_sync('disk.update', id_, {'enclosure': disk_enclosure})
- @private
- @accepts(Str("pool", null=True, default=None))
- def sync_zpool(self, pool):
- """
- Sync enclosure of a given ZFS pool
- """
- for i in self.middleware.call_sync('enclosure.list_ses_enclosures'):
- enc = EnclosureDevice(i)
- for slot, info in filter(lambda x: x[0] != 0 and x[1]['type'] == 23, enc.status()['elements'].items()):
- # slot 0 is the group identifer disk slots
- # type 23 is "Array Device Slot" (i.e. disks)
- slot_str = f'{slot - 1}'
- try:
- if enc.set_control(slot_str, 'get=ident'):
- # identify light is on, clear it
- enc.set_control(slot_str, 'clear=ident')
- if enc.set_control(slot_str, 'get=fault'):
- # fault light is on, clear it
- enc.set_control(slot_str, 'clear=fault')
- except OSError:
- self.logger.warning('Failed to clear slot %r on enclosure %r', slot, i, exc_info=True)
- return
-
def __get_enclosures(self):
return Enclosures(
self.middleware.call_sync("enclosure.get_ses_enclosures"),
@@ -1255,46 +1232,3 @@ def value(self):
if not output:
output.append("None")
return ', '.join(output)
-
-
-async def devd_zfs_hook(middleware, data):
- if data.get('type') in (
- 'ATTACH',
- 'DETACH',
- 'resource.fs.zfs.removed',
- 'misc.fs.zfs.config_sync',
- 'misc.fs.zfs.vdev_remove',
- ):
- await middleware.call('enclosure.sync_zpool')
-
-
-async def zfs_events_hook(middleware, data):
- event_id = data['class']
-
- if event_id in [
- 'sysevent.fs.zfs.config_sync',
- 'sysevent.fs.zfs.vdev_remove',
- ]:
- await middleware.call('enclosure.sync_zpool')
-
-
-async def udev_block_devices_hook(middleware, data):
- if data.get('SUBSYSTEM') != 'block':
- return
- elif data.get('DEVTYPE') != 'disk':
- return
- elif data['SYS_NAME'].startswith(DISKS_TO_IGNORE):
- return
-
- if data['ACTION'] in ['add', 'remove']:
- await middleware.call('enclosure.sync_zpool')
-
-
-async def pool_post_delete(middleware, id_):
- await middleware.call('enclosure.sync_zpool')
-
-
-def setup(middleware):
- middleware.register_hook('zfs.pool.events', zfs_events_hook)
- middleware.register_hook('udev.block', udev_block_devices_hook)
- middleware.register_hook('pool.post_delete', pool_post_delete)
diff --git a/src/middlewared/middlewared/plugins/enclosure_/enums.py b/src/middlewared/middlewared/plugins/enclosure_/enums.py
index 384ff2bb57a3d..4d8f5eef0cfe7 100644
--- a/src/middlewared/middlewared/plugins/enclosure_/enums.py
+++ b/src/middlewared/middlewared/plugins/enclosure_/enums.py
@@ -22,6 +22,7 @@ class ControllerModels(Enum):
F130 = 'F130' # all nvme flash
H10 = 'H10'
H20 = 'H20'
+ H30 = 'H30'
M30 = 'M30'
M40 = 'M40'
M50 = 'M50'
diff --git a/src/middlewared/middlewared/plugins/enclosure_/slot_mappings.py b/src/middlewared/middlewared/plugins/enclosure_/slot_mappings.py
index f30910bf8e392..03ec8665cbf24 100644
--- a/src/middlewared/middlewared/plugins/enclosure_/slot_mappings.py
+++ b/src/middlewared/middlewared/plugins/enclosure_/slot_mappings.py
@@ -446,21 +446,22 @@ def get_slot_info(enc):
'DEFAULT': {
'model': {
enc.model: {
- 3: {SYSFS_SLOT_KEY: 2, MAPPED_SLOT_KEY: 1, SUPPORTS_IDENTIFY_KEY: False},
- 6: {SYSFS_SLOT_KEY: 5, MAPPED_SLOT_KEY: 2, SUPPORTS_IDENTIFY_KEY: False},
- 9: {SYSFS_SLOT_KEY: 8, MAPPED_SLOT_KEY: 3, SUPPORTS_IDENTIFY_KEY: False},
- 12: {SYSFS_SLOT_KEY: 11, MAPPED_SLOT_KEY: 4, SUPPORTS_IDENTIFY_KEY: False},
- 2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 5, SUPPORTS_IDENTIFY_KEY: False},
- 5: {SYSFS_SLOT_KEY: 4, MAPPED_SLOT_KEY: 6, SUPPORTS_IDENTIFY_KEY: False},
- 8: {SYSFS_SLOT_KEY: 7, MAPPED_SLOT_KEY: 7, SUPPORTS_IDENTIFY_KEY: False},
- 11: {SYSFS_SLOT_KEY: 10, MAPPED_SLOT_KEY: 8, SUPPORTS_IDENTIFY_KEY: False},
- 1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 9, SUPPORTS_IDENTIFY_KEY: False},
- 4: {SYSFS_SLOT_KEY: 3, MAPPED_SLOT_KEY: 10, SUPPORTS_IDENTIFY_KEY: False},
- 7: {SYSFS_SLOT_KEY: 6, MAPPED_SLOT_KEY: 11, SUPPORTS_IDENTIFY_KEY: False},
- 10: {SYSFS_SLOT_KEY: 9, MAPPED_SLOT_KEY: 12, SUPPORTS_IDENTIFY_KEY: False}
+ 1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 1, SUPPORTS_IDENTIFY_KEY: True},
+ 2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 2, SUPPORTS_IDENTIFY_KEY: True},
+ 3: {SYSFS_SLOT_KEY: 2, MAPPED_SLOT_KEY: 3, SUPPORTS_IDENTIFY_KEY: True},
+ 4: {SYSFS_SLOT_KEY: 3, MAPPED_SLOT_KEY: 4, SUPPORTS_IDENTIFY_KEY: True},
+ 5: {SYSFS_SLOT_KEY: 4, MAPPED_SLOT_KEY: 5, SUPPORTS_IDENTIFY_KEY: True},
+ 6: {SYSFS_SLOT_KEY: 5, MAPPED_SLOT_KEY: 6, SUPPORTS_IDENTIFY_KEY: True},
+ 7: {SYSFS_SLOT_KEY: 6, MAPPED_SLOT_KEY: 7, SUPPORTS_IDENTIFY_KEY: True},
+ 8: {SYSFS_SLOT_KEY: 7, MAPPED_SLOT_KEY: 8, SUPPORTS_IDENTIFY_KEY: True},
+ 9: {SYSFS_SLOT_KEY: 8, MAPPED_SLOT_KEY: 9, SUPPORTS_IDENTIFY_KEY: True},
+ 10: {SYSFS_SLOT_KEY: 9, MAPPED_SLOT_KEY: 10, SUPPORTS_IDENTIFY_KEY: True},
+ 11: {SYSFS_SLOT_KEY: 10, MAPPED_SLOT_KEY: 11, SUPPORTS_IDENTIFY_KEY: True},
+ 12: {SYSFS_SLOT_KEY: 11, MAPPED_SLOT_KEY: 12, SUPPORTS_IDENTIFY_KEY: True}
}
},
'id': {
+ # on the rear of the system (do not support identify lights)
'3000000000000001': {
1: {SYSFS_SLOT_KEY: 0, MAPPED_SLOT_KEY: 13, SUPPORTS_IDENTIFY_KEY: False},
2: {SYSFS_SLOT_KEY: 1, MAPPED_SLOT_KEY: 14, SUPPORTS_IDENTIFY_KEY: False}
diff --git a/src/middlewared/middlewared/plugins/failover.py b/src/middlewared/middlewared/plugins/failover.py
index e876e43f963c9..ad312c20efafb 100644
--- a/src/middlewared/middlewared/plugins/failover.py
+++ b/src/middlewared/middlewared/plugins/failover.py
@@ -23,6 +23,7 @@
from middlewared.plugins.failover_.configure import HA_LICENSE_CACHE_KEY
from middlewared.plugins.failover_.remote import NETWORK_ERRORS
from middlewared.plugins.system.reboot import RebootReason
+from middlewared.plugins.update import SYSTEM_UPGRADE_REBOOT_REASON
from middlewared.plugins.update_.install import STARTING_INSTALLER
from middlewared.plugins.update_.utils import DOWNLOAD_UPDATE_FILE
from middlewared.plugins.update_.utils_linux import mount_update
@@ -388,7 +389,7 @@ def sync_to_peer(self, options):
)
if options['reboot']:
- self.middleware.call_sync('failover.call_remote', 'system.reboot', [{'delay': 2}])
+ self.middleware.call_sync('failover.call_remote', 'system.reboot', ['Failover sync to peer', {'delay': 2}])
@accepts(roles=['FAILOVER_WRITE'])
@returns()
@@ -858,9 +859,7 @@ def callback(j, controller):
rjob.result()
self.middleware.call_sync(
- 'failover.call_remote', 'system.reboot',
- [{'delay': 5}],
- {'job': True}
+ 'failover.call_remote', 'system.reboot', [SYSTEM_UPGRADE_REBOOT_REASON, {'delay': 5}], {'job': True},
)
except Exception:
raise
@@ -1160,7 +1159,6 @@ async def hook_setup_ha(middleware, *args, **kwargs):
async def hook_pool_export(middleware, pool=None, *args, **kwargs):
- await middleware.call('enclosure.sync_zpool', pool)
await middleware.call('failover.remove_encryption_keys', {'pools': [pool]})
diff --git a/src/middlewared/middlewared/plugins/failover_/event.py b/src/middlewared/middlewared/plugins/failover_/event.py
index 776d8f782bf69..e790d78550487 100644
--- a/src/middlewared/middlewared/plugins/failover_/event.py
+++ b/src/middlewared/middlewared/plugins/failover_/event.py
@@ -122,16 +122,6 @@ async def restart_services(self, data):
svc, data['timeout']
)
- async def background(self):
- """
- Some methods can be backgrounded on a failover
- event since they can take quite some time to
- finish. So background them to not hold up the
- entire failover event.
- """
- logger.info('Syncing enclosure')
- self.middleware.create_task(self.middleware.call('enclosure.sync_zpool'))
-
async def refresh_failover_status(self, jobid, event):
# this is called in a background task so we need to make sure that
# we wait on the current failover job to complete before we try
@@ -696,13 +686,6 @@ def vrrp_master(self, job, fobj, ifname, event):
self.run_call('disk.sync_all', {'zfs_guid': True})
logger.info('Done syncing disks')
- # background any methods that can take awhile to
- # run but shouldn't hold up the entire failover
- # event
- logger.info('Starting failover background jobs')
- self.run_call('failover.events.background')
- logger.info('Done starting failover background jobs')
-
if handle_alua:
try:
if iscsi_suspended:
diff --git a/src/middlewared/middlewared/plugins/failover_/fenced.py b/src/middlewared/middlewared/plugins/failover_/fenced.py
index ffc5d704949ab..0c697e2dd8906 100644
--- a/src/middlewared/middlewared/plugins/failover_/fenced.py
+++ b/src/middlewared/middlewared/plugins/failover_/fenced.py
@@ -9,6 +9,7 @@
import os
import signal
+from middlewared.plugins.system.product import ProductType
from middlewared.service import Service, CallError
from middlewared.utils import MIDDLEWARE_RUN_DIR
from middlewared.utils.cgroups import move_to_root_cgroups
@@ -115,7 +116,7 @@ def signal(self, options):
async def hook_pool_event(middleware, *args, **kwargs):
# only run this on SCALE Enterprise
- if await middleware.call('system.product_type') != 'SCALE_ENTERPRISE':
+ if await middleware.call('system.product_type') != ProductType.SCALE_ENTERPRISE:
return
# HA licensed systems call fenced on their own
diff --git a/src/middlewared/middlewared/plugins/filesystem_/acl.py b/src/middlewared/middlewared/plugins/filesystem_/acl.py
index 97748f9dd0da2..0721748dd8f00 100644
--- a/src/middlewared/middlewared/plugins/filesystem_/acl.py
+++ b/src/middlewared/middlewared/plugins/filesystem_/acl.py
@@ -293,37 +293,6 @@ def setperm(self, job, data):
self.__acltool(data['path'], action, uid, gid, options)
job.set_progress(100, 'Finished setting permissions.')
- @accepts(Str('path', required=False, default=''))
- @returns(List('acl_choices', items=[Str("choice")]))
- async def default_acl_choices(self, path):
- """
- `DEPRECATED`
- Returns list of names of ACL templates. Wrapper around
- filesystem.acltemplate.query.
- """
- acl_templates = await self.middleware.call('filesystem.acltemplate.by_path', {"path": path})
- return [x['name'] for x in acl_templates]
-
- @accepts(
- Str('acl_type', default='POSIX_OPEN'),
- Str('share_type', default='NONE', enum=['NONE', 'SMB', 'NFS']),
- )
- @returns(OROperator(Ref('nfs4_acl'), Ref('posix1e_acl'), name='acl'))
- async def get_default_acl(self, acl_type, share_type):
- """
- `DEPRECATED`
- Returns a default ACL depending on the usage specified by `acl_type`.
- If an admin group is defined, then an entry granting it full control will
- be placed at the top of the ACL. Optionally may pass `share_type` to argument
- to get share-specific template ACL.
- """
- filters = [("name", "=", acl_type)]
- options = {"ensure_builtins": share_type == "SMB"}
- return (await self.middleware.call("filesystem.acltemplate.by_path", {
- "query-filters": filters,
- "format-options": options
- }))[0]['acl']
-
@private
def getacl_nfs4(self, path, simplified, resolve_ids):
flags = "-jn"
diff --git a/src/middlewared/middlewared/plugins/idmap.py b/src/middlewared/middlewared/plugins/idmap.py
index 1f1a7953bea3a..eef071b26a145 100644
--- a/src/middlewared/middlewared/plugins/idmap.py
+++ b/src/middlewared/middlewared/plugins/idmap.py
@@ -456,16 +456,6 @@ async def backend_options(self):
"""
return {x.name: x.value for x in IdmapBackend}
- @accepts(
- Str('idmap_backend', enum=[x.name for x in IdmapBackend]),
- roles=['DIRECTORY_SERVICE_READ']
- )
- async def options_choices(self, backend):
- """
- Returns a list of supported keys for the specified idmap backend.
- """
- return IdmapBackend[backend].supported_keys()
-
@accepts(roles=['DIRECTORY_SERVICE_READ'])
async def backend_choices(self):
"""
diff --git a/src/middlewared/middlewared/plugins/iscsi_/host_target.py b/src/middlewared/middlewared/plugins/iscsi_/host_target.py
index c333ba433467d..5f94ce8fd48a0 100644
--- a/src/middlewared/middlewared/plugins/iscsi_/host_target.py
+++ b/src/middlewared/middlewared/plugins/iscsi_/host_target.py
@@ -1,6 +1,6 @@
from collections import defaultdict
-from middlewared.schema import accepts, Int, List
+from middlewared.schema import accepts, Int
from middlewared.service import private, Service, ServiceChangeMixin
import middlewared.sqlalchemy as sa
@@ -30,26 +30,6 @@ async def get_targets(self, id_):
], {"relationships": False})
]]])
- @accepts(Int("id"),
- List("ids", items=[Int("id")]),
- audit='Set iSCSI host targets',
- audit_callback=True,
- roles=['SHARING_ISCSI_HOST_WRITE'])
- async def set_targets(self, audit_callback, id_, ids):
- """
- Associates targets `ids` with host `id`.
- """
- audit_callback(await self._audit_summary(id_, ids))
- await self.middleware.call("datastore.delete", "services.iscsihosttarget", [["host_id", "=", id_]])
-
- for target_id in ids:
- await self.middleware.call("datastore.insert", "services.iscsihosttarget", {
- "host_id": id_,
- "target_id": target_id,
- })
-
- await self._service_change("iscsitarget", "reload")
-
async def _audit_summary(self, id_, ids):
"""
Return a summary string of the data provided, to be used in the audit summary.
diff --git a/src/middlewared/middlewared/plugins/kerberos.py b/src/middlewared/middlewared/plugins/kerberos.py
index fcbd170857ebf..e433c31110c48 100644
--- a/src/middlewared/middlewared/plugins/kerberos.py
+++ b/src/middlewared/middlewared/plugins/kerberos.py
@@ -2,14 +2,12 @@
import base64
import errno
import gssapi
-import io
import os
-import shutil
import subprocess
import tempfile
import time
-from middlewared.schema import accepts, returns, Dict, Int, List, Patch, Str, OROperator, Password, Ref, Datetime, Bool
+from middlewared.schema import accepts, Dict, Int, List, Patch, Str, OROperator, Password, Ref, Bool
from middlewared.service import CallError, ConfigService, CRUDService, job, periodic, private, ValidationErrors
import middlewared.sqlalchemy as sa
from middlewared.utils import run
@@ -840,23 +838,6 @@ async def do_delete(self, audit_callback, id_):
'Failed to start kerberos service after deleting keytab entry: %s' % e
)
- @accepts(Dict(
- 'keytab_data',
- Str('name', required=True),
- ), audit='Kerberos keytab upload:', audit_extended=lambda name: name)
- @returns(Ref('kerberos_keytab_entry'))
- @job(lock='upload_keytab', pipes=['input'], check_pipes=True)
- async def upload_keytab(self, job, data):
- """
- Upload a keytab file. This method expects the keytab file to be uploaded using
- the /_upload/ endpoint.
- """
- ktmem = io.BytesIO()
- await self.middleware.run_in_thread(shutil.copyfileobj, job.pipes.input.r, ktmem)
- b64kt = base64.b64encode(ktmem.getvalue())
- return await self.middleware.call('kerberos.keytab.create',
- {'name': data['name'], 'file': b64kt.decode()})
-
@private
async def _cleanup_kerberos_principals(self):
principal_choices = await self.middleware.call('kerberos.keytab.kerberos_principal_choices')
@@ -910,27 +891,6 @@ async def ktutil_list(self, keytab_file=KRB_Keytab['SYSTEM'].value):
return []
- @accepts(roles=['DIRECTORY_SERVICE_READ'])
- @returns(List(
- 'system-keytab',
- items=[
- Dict(
- 'keytab-entry',
- Int('slot'),
- Int('kvno'),
- Str('principal'),
- Str('etype'),
- Bool('etype_deprecated'),
- Datetime('date')
- )
- ]
- ))
- async def system_keytab_list(self):
- """
- Returns content of system keytab (/etc/krb5.keytab).
- """
- return await self.ktutil_list()
-
@private
async def kerberos_principal_choices(self):
"""
diff --git a/src/middlewared/middlewared/plugins/kmip/update.py b/src/middlewared/middlewared/plugins/kmip/update.py
index fa6bf96866385..0ca2ada7e730e 100644
--- a/src/middlewared/middlewared/plugins/kmip/update.py
+++ b/src/middlewared/middlewared/plugins/kmip/update.py
@@ -53,14 +53,6 @@ async def kmip_extend(self, data):
data[k] = data[k]['id']
return data
- @accepts(roles=['KMIP_READ'])
- @returns(Dict(*[Str(i, enum=[i]) for i in SUPPORTED_SSL_VERSIONS]))
- async def ssl_version_choices(self):
- """
- Retrieve valid SSL version choices to be used when configuring kmip service.
- """
- return {k: k for k in SUPPORTED_SSL_VERSIONS}
-
@accepts(
Patch(
'kmip_entry', 'kmip_update',
diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate.py
index 8dd9f84ce365d..15b3061c21ad5 100644
--- a/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate.py
+++ b/src/middlewared/middlewared/plugins/kubernetes_to_docker/migrate.py
@@ -211,12 +211,11 @@ def migrate(self, job, kubernetes_pool, options):
if bulk_job.error:
raise CallError(f'Failed to redeploy apps: {bulk_job.error}')
- for index, status in enumerate(bulk_job.result):
- if status['error']:
- release_details[index].update({
- 'error': f'Failed to deploy app: {status["error"]}',
- 'successfully_migrated': False,
- })
+ # We won't check here if the apps are working or not, as the idea of this endpoint is to migrate
+ # apps from k8s to docker which is complete at this point. If the app is not running at this point,
+ # that does not mean the migration didn't work - it's an app problem and we need to fix/investigate
+ # it accordingly. User will see the app is not working in the UI and can raise a ticket accordingly
+ # or consult app lifecycle logs.
job.set_progress(100, 'Migration completed')
diff --git a/src/middlewared/middlewared/plugins/mail.py b/src/middlewared/middlewared/plugins/mail.py
index 0c7a01f60c507..3409df41c3958 100644
--- a/src/middlewared/middlewared/plugins/mail.py
+++ b/src/middlewared/middlewared/plugins/mail.py
@@ -1,8 +1,7 @@
from middlewared.schema import accepts, Bool, Dict, Int, List, Password, Patch, Ref, returns, Str
from middlewared.service import CallError, ConfigService, ValidationErrors, job, periodic, private
import middlewared.sqlalchemy as sa
-from middlewared.plugins.system.product import PRODUCT_NAME
-from middlewared.utils import BRAND
+from middlewared.utils import ProductName, BRAND
from middlewared.utils.mako import get_template
from middlewared.validators import Email
@@ -247,7 +246,7 @@ def send(self, job, message, config):
"""
gc = self.middleware.call_sync('datastore.config', 'network.globalconfiguration')
hostname = f'{gc["gc_hostname"]}.{gc["gc_domain"]}'
- message['subject'] = f'{PRODUCT_NAME} {hostname}: {message["subject"]}'
+ message['subject'] = f'{ProductName.PRODUCT_NAME} {hostname}: {message["subject"]}'
add_html = True
if 'html' in message and message['html'] is None:
message.pop('html')
diff --git a/src/middlewared/middlewared/plugins/pool_/dataset.py b/src/middlewared/middlewared/plugins/pool_/dataset.py
index e9b2340484e66..bbbae99dbf3ff 100644
--- a/src/middlewared/middlewared/plugins/pool_/dataset.py
+++ b/src/middlewared/middlewared/plugins/pool_/dataset.py
@@ -990,49 +990,6 @@ async def do_delete(self, audit_callback, id_, options):
})
return result
- @accepts(
- Str('name'),
- Dict(
- 'snapshots',
- Bool('all', default=True),
- Bool('recursive', default=False),
- List(
- 'snapshots', items=[Dict(
- 'snapshot_spec',
- Str('start'),
- Str('end'),
- ), Str('snapshot_name')]
- ),
- ),
- )
- @returns(List('deleted_snapshots', items=[Str('deleted_snapshot')]))
- @job(lock=lambda args: f'destroy_snapshots_{args[0]}')
- async def destroy_snapshots(self, job, name, snapshots_spec):
- """
- Destroy specified snapshots of a given dataset.
- """
- await self.get_instance(name, {'extra': {
- 'properties': [],
- 'retrieve_children': False,
- }})
-
- verrors = ValidationErrors()
- schema_name = 'destroy_snapshots'
- if snapshots_spec['all'] and snapshots_spec['snapshots']:
- verrors.add(
- f'{schema_name}.snapshots', 'Must not be specified when all snapshots are specified for removal'
- )
- else:
- for i, entry in enumerate(snapshots_spec['snapshots']):
- if not entry:
- verrors.add(f'{schema_name}.snapshots.{i}', 'Either "start" or "end" must be specified')
-
- verrors.check()
-
- job.set_progress(20, 'Initial validation complete')
-
- return await self.middleware.call('zfs.dataset.destroy_snapshots', name, snapshots_spec)
-
def __handle_zfs_set_property_error(self, e, properties_definitions):
zfs_name_to_api_name = {i[1]: i[0] for i in properties_definitions}
api_name = zfs_name_to_api_name.get(e.property) or e.property
diff --git a/src/middlewared/middlewared/plugins/pool_/dataset_mountpoint.py b/src/middlewared/middlewared/plugins/pool_/dataset_mountpoint.py
deleted file mode 100644
index f0d3c30389c5f..0000000000000
--- a/src/middlewared/middlewared/plugins/pool_/dataset_mountpoint.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from middlewared.schema import accepts, returns, Bool, Str
-from middlewared.service import CallError, Service
-
-
-class PoolDatasetService(Service):
-
- class Config:
- namespace = "pool.dataset"
-
- @accepts(Str("dataset"), Bool("raise", default=True), roles=['DATASET_READ'])
- @returns(Str(null=True))
- async def mountpoint(self, dataset, raise_):
- """
- Returns mountpoint for specific mounted dataset. If it is not mounted and `raise` is `true` (default), an
- error is raised. `null` is returned otherwise.
- """
- if mount_info := await self.middleware.call("filesystem.mount_info", [["mount_source", "=", dataset]]):
- return mount_info[0]["mountpoint"]
-
- if raise_:
- raise CallError(f"Dataset {dataset!r} is not mounted")
diff --git a/src/middlewared/middlewared/plugins/rdma/constants.py b/src/middlewared/middlewared/plugins/rdma/constants.py
new file mode 100644
index 0000000000000..d71095bdca973
--- /dev/null
+++ b/src/middlewared/middlewared/plugins/rdma/constants.py
@@ -0,0 +1,9 @@
+import enum
+
+
+class RDMAprotocols(enum.Enum):
+ NFS = 'NFS'
+ ISER = 'iSER'
+
+ def values():
+ return [a.value for a in RDMAprotocols]
diff --git a/src/middlewared/middlewared/plugins/rdma/rdma.py b/src/middlewared/middlewared/plugins/rdma/rdma.py
index 41f643af54695..dc8db7e2d5aa6 100644
--- a/src/middlewared/middlewared/plugins/rdma/rdma.py
+++ b/src/middlewared/middlewared/plugins/rdma/rdma.py
@@ -1,6 +1,7 @@
import json
import subprocess
from pathlib import Path
+from .constants import RDMAprotocols
from middlewared.schema import Bool, Dict, List, Ref, Str, accepts, returns
from middlewared.service import Service, private
@@ -133,3 +134,12 @@ def get_card_choices(self):
names = [link['rdma'] for link in v['links']]
v['name'] = ':'.join(sorted(names))
return list(grouper.values())
+
+ @private
+ async def capable_services(self):
+ result = []
+ is_ent = await self.middleware.call('system.is_enterprise')
+ if is_ent and 'MINI' not in await self.middleware.call('truenas.get_chassis_hardware'):
+ if await self.middleware.call('rdma.get_link_choices', True):
+ result.append(RDMAprotocols.NFS.value)
+ return result
diff --git a/src/middlewared/middlewared/plugins/reporting/netdata/graphs.py b/src/middlewared/middlewared/plugins/reporting/netdata/graphs.py
index 3a2563f332e90..34a4e3ddf1a7a 100644
--- a/src/middlewared/middlewared/plugins/reporting/netdata/graphs.py
+++ b/src/middlewared/middlewared/plugins/reporting/netdata/graphs.py
@@ -253,6 +253,7 @@ def get_chart_name(self, identifier: typing.Optional[str] = None) -> str:
class UPSBase(GraphBase):
UPS_IDENTIFIER = None
+ skip_zero_values_in_aggregation = True
async def export_multiple_identifiers(
self, query_params: dict, identifiers: list, aggregate: bool = True
@@ -260,6 +261,11 @@ async def export_multiple_identifiers(
self.UPS_IDENTIFIER = (await self.middleware.call('ups.config'))['identifier']
return await super().export_multiple_identifiers(query_params, identifiers, aggregate)
+ def query_parameters(self) -> dict:
+ return super().query_parameters() | {
+ 'group': 'median'
+ }
+
class UPSChargePlugin(UPSBase):
@@ -333,7 +339,6 @@ class UPSTemperaturePlugin(UPSBase):
title = 'UPS Temperature'
vertical_label = 'Celsius'
- skip_zero_values_in_aggregation = True
uses_identifiers = False
def get_chart_name(self, identifier: typing.Optional[str]) -> str:
diff --git a/src/middlewared/middlewared/plugins/reporting/netdata_configure.py b/src/middlewared/middlewared/plugins/reporting/netdata_configure.py
index 70c20b7d18b1f..0f4ee8b90e633 100644
--- a/src/middlewared/middlewared/plugins/reporting/netdata_configure.py
+++ b/src/middlewared/middlewared/plugins/reporting/netdata_configure.py
@@ -2,6 +2,7 @@
import subprocess
from middlewared.service import private, Service
+from middlewared.utils.filesystem.copy import copytree, CopyTreeConfig
from .utils import get_netdata_state_path
@@ -24,17 +25,20 @@ def netdata_state_location(self):
@private
def post_dataset_mount_action(self):
- if os.path.exists(get_netdata_state_path()):
+ netdata_state_path = get_netdata_state_path()
+ # We want to make sure this path exists always regardless of an error so that
+ # at least netdata can start itself gracefully
+ try:
+ os.makedirs(netdata_state_path, exist_ok=False)
+ except FileExistsError:
return
- cp = subprocess.run(
- ['cp', '-a', '/var/lib/netdata', get_netdata_state_path()], check=False, capture_output=True,
- )
- if cp.returncode != 0:
- self.logger.error('Failed to copy netdata state over from /var/lib/netdata: %r', cp.stderr.decode())
- # We want to make sure this path exists always regardless of an error so that
- # at least netdata can start itself gracefully
- os.makedirs(get_netdata_state_path(), exist_ok=True)
+ try:
+ copytree('/var/lib/netdata', netdata_state_path, config=CopyTreeConfig())
+ except Exception:
+ self.logger.error('Failed to copy netdata state over from /var/lib/netdata', exc_info=True)
+ os.chown(netdata_state_path, uid=999, gid=997)
+ os.chmod(netdata_state_path, mode=0o755)
@private
async def start_service(self):
diff --git a/src/middlewared/middlewared/plugins/service_/services/all.py b/src/middlewared/middlewared/plugins/service_/services/all.py
index 1061f46fa7e7d..0286d75621854 100644
--- a/src/middlewared/middlewared/plugins/service_/services/all.py
+++ b/src/middlewared/middlewared/plugins/service_/services/all.py
@@ -36,7 +36,6 @@
RoutingService,
SslService,
SyslogdService,
- SystemService,
TimeservicesService,
UserService,
)
@@ -78,7 +77,6 @@
RoutingService,
SslService,
SyslogdService,
- SystemService,
TimeservicesService,
TruecommandService,
UserService,
diff --git a/src/middlewared/middlewared/plugins/service_/services/docker.py b/src/middlewared/middlewared/plugins/service_/services/docker.py
index c64fb84bb599a..1c14d3a0d225f 100644
--- a/src/middlewared/middlewared/plugins/service_/services/docker.py
+++ b/src/middlewared/middlewared/plugins/service_/services/docker.py
@@ -22,7 +22,7 @@ async def before_start(self):
async def start(self):
try:
await super().start()
- timeout = 40
+ timeout = 120 # We have this at 120 because HDDs are notorious and docker can take more time there
# First time when docker is started, it takes a bit more time to initialise itself properly
# and we need to have sleep here so that after start is called post_start is not dismissed
while timeout > 0:
diff --git a/src/middlewared/middlewared/plugins/service_/services/pseudo/misc.py b/src/middlewared/middlewared/plugins/service_/services/pseudo/misc.py
index f0a470cec662f..c96fd458fdbb5 100644
--- a/src/middlewared/middlewared/plugins/service_/services/pseudo/misc.py
+++ b/src/middlewared/middlewared/plugins/service_/services/pseudo/misc.py
@@ -172,18 +172,6 @@ class SyslogdService(SimpleService):
systemd_unit = "syslog-ng"
-class SystemService(PseudoServiceBase):
- name = "system"
-
- restartable = True
-
- async def stop(self):
- self.middleware.create_task(self.middleware.call("system.shutdown", {"delay": 3}))
-
- async def restart(self):
- self.middleware.create_task(self.middleware.call("system.reboot", {"delay": 3}))
-
-
class TimeservicesService(PseudoServiceBase):
name = "timeservices"
diff --git a/src/middlewared/middlewared/plugins/smart.py b/src/middlewared/middlewared/plugins/smart.py
index 33d56e2f165ca..2e584131939b3 100644
--- a/src/middlewared/middlewared/plugins/smart.py
+++ b/src/middlewared/middlewared/plugins/smart.py
@@ -4,6 +4,7 @@
import re
import time
import json
+from typing import Any
from humanize import ordinal
@@ -40,7 +41,7 @@ async def annotate_disk_smart_tests(middleware, tests_filter, disk):
return dict(tests=filter_list(tests, tests_filter), current_test=current_test, **disk)
-def parse_smart_selftest_results(data) -> list[AtaSelfTest] | list[NvmeSelfTest] | list[ScsiSelfTest] | None:
+def parse_smart_selftest_results(data) -> list[dict[str, Any]] | None:
tests = []
# ataprint.cpp
@@ -71,7 +72,7 @@ def parse_smart_selftest_results(data) -> list[AtaSelfTest] | list[NvmeSelfTest]
else:
test.status = "FAILED"
- tests.append(test)
+ tests.append(test.dict())
return tests
@@ -103,7 +104,7 @@ def parse_smart_selftest_results(data) -> list[AtaSelfTest] | list[NvmeSelfTest]
else:
test.status = "FAILED"
- tests.append(test)
+ tests.append(test.dict())
return tests
@@ -112,7 +113,7 @@ def parse_smart_selftest_results(data) -> list[AtaSelfTest] | list[NvmeSelfTest]
if "scsi_self_test_0" in data: # 0 is most recent test
for index in range(0, 20): # only 20 tests can ever return
test_key = f"scsi_self_test_{index}"
- if not test_key in data:
+ if test_key not in data:
break
entry = data[test_key]
@@ -145,7 +146,7 @@ def parse_smart_selftest_results(data) -> list[AtaSelfTest] | list[NvmeSelfTest]
else:
test.status = "FAILED"
- tests.append(test)
+ tests.append(test.dict())
return tests
@@ -314,7 +315,7 @@ async def query_for_disk(self, disk_name):
"""
Query S.M.A.R.T. tests for the specified disk name.
"""
- disk = await self.middleware.call_sync('disk.query', [['name', '=', disk_name]], {'get': True})
+ disk = await self.middleware.call('disk.query', [['name', '=', disk_name]], {'get': True})
return [
test
diff --git a/src/middlewared/middlewared/plugins/snapshot.py b/src/middlewared/middlewared/plugins/snapshot.py
index 20f417cd51858..26807f5f3a1df 100644
--- a/src/middlewared/middlewared/plugins/snapshot.py
+++ b/src/middlewared/middlewared/plugins/snapshot.py
@@ -318,64 +318,6 @@ async def do_delete(self, audit_callback, id_, options):
return response
- @accepts(
- Dict(
- 'periodic_snapshot_foreseen_count',
- Int('lifetime_value', required=True),
- Str('lifetime_unit', enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR'], required=True),
- Cron(
- 'schedule',
- defaults={
- 'minute': '00',
- 'begin': '00:00',
- 'end': '23:59',
- },
- required=True,
- begin_end=True
- ),
- register=True,
- )
- )
- @returns(Int())
- def foreseen_count(self, data):
- """
- Returns a number of snapshots (per-dataset) being retained if a periodic snapshot task with specific parameters
- is created.
- """
-
- # Arbitrary year choice, fixed for unit tests repeatability. We don't need the precise answer, we only need
- # to evaluate the magnitude.
- base = datetime(2020, 1, 1, 0, 0, 0) - timedelta(seconds=1)
-
- multiplier = 1
- lifetime_value = data['lifetime_value']
- lifetime_unit = data['lifetime_unit']
- if lifetime_unit == 'YEAR' and lifetime_value > 1:
- # All years are the same, we don't need to run the same croniter multiple times for N years, just need to
- # run it for one year and multiply the result.
- multiplier = lifetime_value
- lifetime_value = 1
-
- until = base + timedelta(seconds=lifetime_value * {
- 'HOUR': 3600,
- 'DAY': 3600 * 24,
- 'WEEK': 3600 * 24 * 7,
- 'MONTH': 3600 * 24 * 30,
- 'YEAR': 3600 * 24 * 365,
- }[lifetime_unit])
-
- iter_ = croniter_for_schedule(data['schedule'], base, datetime)
- count = 0
- while True:
- d = iter_.get_next()
- if d > until:
- break
-
- if data['schedule']['begin'] <= d.time() <= data['schedule']['end']:
- count += 1
-
- return count * multiplier
-
@accepts()
@returns(Int())
def max_count(self):
diff --git a/src/middlewared/middlewared/plugins/support.py b/src/middlewared/middlewared/plugins/support.py
index e27e92ab173ac..15a60dd1c2094 100644
--- a/src/middlewared/middlewared/plugins/support.py
+++ b/src/middlewared/middlewared/plugins/support.py
@@ -146,32 +146,6 @@ async def fields(self):
['secondary_phone', 'Secondary Contact Phone'],
]
- @accepts(Password('token', default=''), roles=['SUPPORT_READ'])
- @returns(Dict(additional_attrs=True, example={'API': '11008', 'WebUI': '10004'}))
- async def fetch_categories(self, token):
- """
- Fetch issue categories using access token `token`.
- Returns a dict with the category name as a key and id as value.
- """
-
- await self.middleware.call('network.general.will_perform_activity', 'support')
-
- if not await self.middleware.call('system.is_enterprise') and not token:
- raise CallError('token is required')
-
- sw_name = 'freenas' if not await self.middleware.call('system.is_enterprise') else 'truenas'
- data = await post(
- f'https://{ADDRESS}/{sw_name}/api/v1.0/categories',
- data=json.dumps({
- 'token': token,
- }),
- )
-
- if 'error' in data:
- raise CallError(data['message'], errno.EINVAL)
-
- return data
-
@accepts(Str('query'), roles=['SUPPORT_READ'])
@returns(List('similar_issues', items=[Dict(
'similar_issue',
diff --git a/src/middlewared/middlewared/plugins/sysdataset.py b/src/middlewared/middlewared/plugins/sysdataset.py
index 4c74ed7b6b15c..d36f06766593e 100644
--- a/src/middlewared/middlewared/plugins/sysdataset.py
+++ b/src/middlewared/middlewared/plugins/sysdataset.py
@@ -286,7 +286,9 @@ async def do_update(self, job, data):
if await self.middleware.call('failover.licensed'):
if await self.middleware.call('failover.status') == 'MASTER':
try:
- await self.middleware.call('failover.call_remote', 'system.reboot')
+ await self.middleware.call(
+ 'failover.call_remote', 'system.reboot', ['Failover system dataset change'],
+ )
except Exception as e:
self.logger.debug('Failed to reboot standby storage controller after system dataset change: %s', e)
@@ -577,7 +579,8 @@ def __mount(self, pool, uuid, path=SYSDATASET_PATH):
os.chmod(mountpoint, mode_perms)
mounted = True
- self.__post_mount_actions(ds_config['name'], ds_config.get('post_mount_actions', []))
+ if path == SYSDATASET_PATH:
+ self.__post_mount_actions(ds_config['name'], ds_config.get('post_mount_actions', []))
if mounted and path == SYSDATASET_PATH:
fsid = os.statvfs(SYSDATASET_PATH).f_fsid
diff --git a/src/middlewared/middlewared/plugins/system/lifecycle.py b/src/middlewared/middlewared/plugins/system/lifecycle.py
index 7f056989570b0..5029daae443aa 100644
--- a/src/middlewared/middlewared/plugins/system/lifecycle.py
+++ b/src/middlewared/middlewared/plugins/system/lifecycle.py
@@ -1,8 +1,10 @@
import asyncio
-from middlewared.schema import accepts, Bool, Dict, Int, returns, Str
+from middlewared.api import api_method
+from middlewared.api.current import SystemRebootArgs, SystemRebootResult, SystemShutdownArgs, SystemShutdownResult
+from middlewared.schema import accepts, Bool, returns, Str
from middlewared.service import job, private, Service, no_auth_required, pass_app
-from middlewared.utils import Popen, run
+from middlewared.utils import run
from .utils import lifecycle_conf, RE_KDUMP_CONFIGURED
@@ -19,7 +21,7 @@ async def first_boot(self):
@pass_app()
async def boot_id(self, app):
"""
- Returns an unique boot identifier.
+ Returns a unique boot identifier.
It is supposed to be unique every system boot.
"""
@@ -53,41 +55,39 @@ async def state(self):
return 'READY'
return 'BOOTING'
- @accepts(Dict('system-reboot', Int('delay', required=False), required=False))
- @returns()
+ @api_method(SystemRebootArgs, SystemRebootResult)
@job()
- async def reboot(self, job, options):
+ @pass_app(rest=True)
+ async def reboot(self, app, job, reason, options):
"""
Reboots the operating system.
Emits an "added" event of name "system" and id "reboot".
"""
- if options is None:
- options = {}
+ await self.middleware.log_audit_message(app, 'REBOOT', {'reason': reason}, True)
- self.middleware.send_event('system.reboot', 'ADDED')
+ self.middleware.send_event('system.reboot', 'ADDED', fields={'reason': reason})
- delay = options.get('delay')
- if delay:
- await asyncio.sleep(delay)
+ if options['delay'] is not None:
+ await asyncio.sleep(options['delay'])
await run(['/sbin/shutdown', '-r', 'now'])
- @accepts(Dict('system-shutdown', Int('delay', required=False), required=False))
- @returns()
+ @api_method(SystemShutdownArgs, SystemShutdownResult)
@job()
- async def shutdown(self, job, options):
+ @pass_app(rest=True)
+ async def shutdown(self, app, job, reason, options):
"""
Shuts down the operating system.
An "added" event of name "system" and id "shutdown" is emitted when shutdown is initiated.
"""
- if options is None:
- options = {}
+ await self.middleware.log_audit_message(app, 'SHUTDOWN', {'reason': reason}, True)
- delay = options.get('delay')
- if delay:
- await asyncio.sleep(delay)
+ self.middleware.send_event('system.shutdown', 'ADDED', fields={'reason': reason})
+
+ if options['delay'] is not None:
+ await asyncio.sleep(options['delay'])
await run(['/sbin/poweroff'])
diff --git a/src/middlewared/middlewared/plugins/system/product.py b/src/middlewared/middlewared/plugins/system/product.py
index 9ba96732026a6..4d306b4d98821 100644
--- a/src/middlewared/middlewared/plugins/system/product.py
+++ b/src/middlewared/middlewared/plugins/system/product.py
@@ -11,7 +11,7 @@
from middlewared.plugins.truenas import EULA_PENDING_PATH
from middlewared.schema import accepts, Bool, returns, Str
from middlewared.service import no_authz_required, private, Service, ValidationError
-from middlewared.utils import sw_info
+from middlewared.utils import ProductType, sw_info
from middlewared.utils.license import LICENSE_ADDHW_MAPPING
@@ -36,19 +36,19 @@ async def product_type(self):
if SystemService.PRODUCT_TYPE is None:
if await self.is_ha_capable():
# HA capable hardware
- SystemService.PRODUCT_TYPE = 'SCALE_ENTERPRISE'
+ SystemService.PRODUCT_TYPE = ProductType.SCALE_ENTERPRISE
else:
if license_ := await self.middleware.call('system.license'):
if license_['model'].lower().startswith('freenas'):
# legacy freenas certified
- SystemService.PRODUCT_TYPE = 'SCALE'
+ SystemService.PRODUCT_TYPE = ProductType.SCALE
else:
# the license has been issued for a "certified" line
# of hardware which is considered enterprise
- SystemService.PRODUCT_TYPE = 'SCALE_ENTERPRISE'
+ SystemService.PRODUCT_TYPE = ProductType.SCALE_ENTERPRISE
else:
# no license
- SystemService.PRODUCT_TYPE = 'SCALE'
+ SystemService.PRODUCT_TYPE = ProductType.SCALE
return SystemService.PRODUCT_TYPE
@@ -58,7 +58,7 @@ async def is_ha_capable(self):
@private
async def is_enterprise(self):
- return await self.middleware.call('system.product_type') == 'SCALE_ENTERPRISE'
+ return await self.middleware.call('system.product_type') == ProductType.SCALE_ENTERPRISE
@no_authz_required
@accepts()
@@ -198,11 +198,6 @@ async def feature_enabled(self, name):
"""
Returns whether the `feature` is enabled or not
"""
- is_core = (await self.middleware.call('system.product_type')) == 'CORE'
- if name == 'FIBRECHANNEL' and is_core:
- return False
- elif is_core:
- return True
license_ = await self.middleware.call('system.license')
if license_ and name in license_['features']:
return True
diff --git a/src/middlewared/middlewared/plugins/truenas.py b/src/middlewared/middlewared/plugins/truenas.py
index 50add5e02478f..9eb6dd8e184b7 100644
--- a/src/middlewared/middlewared/plugins/truenas.py
+++ b/src/middlewared/middlewared/plugins/truenas.py
@@ -1,43 +1,15 @@
import errno
-import json
import os
-from datetime import timedelta
from ixhardware import TRUENAS_UNKNOWN, get_chassis_hardware
from middlewared.plugins.truecommand.enums import Status as TrueCommandStatus
-from middlewared.schema import accepts, Bool, Dict, Patch, returns, Str
+from middlewared.schema import accepts, Bool, Patch, returns, Str
from middlewared.service import cli_private, job, no_auth_required, private, Service
from middlewared.utils.functools_ import cache
-from middlewared.utils.time_utils import utc_now
-import middlewared.sqlalchemy as sa
EULA_FILE = '/usr/local/share/truenas/eula.html'
EULA_PENDING_PATH = "/data/truenas-eula-pending"
-user_attrs = [
- Str('first_name'),
- Str('last_name'),
- Str('title'),
- Str('office_phone'),
- Str('mobile_phone'),
- Str('primary_email'),
- Str('secondary_email'),
- Str('address'),
- Str('city'),
- Str('state'),
- Str('zip'),
- Str('country'),
-]
-
-
-class TruenasCustomerInformationModel(sa.Model):
- __tablename__ = 'truenas_customerinformation'
-
- id = sa.Column(sa.Integer(), primary_key=True)
- data = sa.Column(sa.Text())
- updated_at = sa.Column(sa.DateTime())
- sent_at = sa.Column(sa.DateTime(), nullable=True)
- form_dismissed = sa.Column(sa.Boolean())
class TrueNASService(Service):
@@ -116,76 +88,6 @@ def unaccept_eula(self):
with open(EULA_PENDING_PATH, "w") as f:
os.fchmod(f.fileno(), 0o600)
- # TODO: Document this please
- @accepts()
- async def get_customer_information(self):
- """
- Returns stored customer information.
- """
- result = await self.__fetch_customer_information()
- return result
-
- @accepts(Dict(
- 'customer_information_update',
- Str('company'),
- Dict('administrative_user', *user_attrs),
- Dict('technical_user', *user_attrs),
- Dict(
- 'reseller',
- Str('company'),
- Str('first_name'),
- Str('last_name'),
- Str('title'),
- Str('office_phone'),
- Str('mobile_phone'),
- ),
- Dict(
- 'physical_location',
- Str('address'),
- Str('city'),
- Str('state'),
- Str('zip'),
- Str('country'),
- Str('contact_name'),
- Str('contact_phone_number'),
- Str('contact_email'),
- ),
- Str('primary_use_case'),
- Str('other_primary_use_case'),
- ))
- async def update_customer_information(self, data):
- """
- Updates customer information.
- """
- customer_information = await self.__fetch_customer_information()
-
- await self.middleware.call('datastore.update', 'truenas.customerinformation', customer_information["id"], {
- "data": json.dumps(data),
- "updated_at": utc_now(),
- })
-
- return customer_information
-
- async def __fetch_customer_information(self):
- result = await self.middleware.call('datastore.config', 'truenas.customerinformation')
- result["immutable_data"] = await self.__fetch_customer_information_immutable_data()
- result["data"] = json.loads(result["data"])
- result["needs_update"] = utc_now() - result["updated_at"] > timedelta(days=365)
- return result
-
- async def __fetch_customer_information_immutable_data(self):
- license_ = await self.middleware.call('system.license')
- if license_ is None:
- return None
-
- return {
- "serial_number": license_['system_serial'],
- "serial_number_ha": license_['system_serial_ha'],
- "support_level": license_['contract_type'].title(),
- "support_start_date": license_['contract_start'].isoformat(),
- "support_end_date": license_['contract_end'].isoformat(),
- }
-
@accepts(roles=['READONLY_ADMIN'])
@returns(Bool('is_production_system'))
async def is_production(self):
diff --git a/src/middlewared/middlewared/plugins/update.py b/src/middlewared/middlewared/plugins/update.py
index 5dd46df2a0cc8..6a2a22117ae0a 100644
--- a/src/middlewared/middlewared/plugins/update.py
+++ b/src/middlewared/middlewared/plugins/update.py
@@ -1,5 +1,5 @@
from middlewared.schema import accepts, Bool, Dict, Str
-from middlewared.service import job, private, CallError, Service
+from middlewared.service import job, private, CallError, Service, pass_app
import middlewared.sqlalchemy as sa
from middlewared.plugins.update_.utils import UPLOAD_LOCATION
from middlewared.utils import PRODUCT
@@ -13,6 +13,8 @@
import textwrap
import pathlib
+SYSTEM_UPGRADE_REBOOT_REASON = 'System upgrade'
+
def parse_train_name(name):
split = (name + '-').split('-')
@@ -256,7 +258,8 @@ async def get_pending(self, path):
Bool('reboot', default=False),
))
@job(lock='update')
- async def update(self, job, attrs):
+ @pass_app(rest=True)
+ async def update(self, app, job, attrs):
"""
Downloads (if not already in cache) and apply an update.
@@ -288,7 +291,7 @@ async def update(self, job, attrs):
await self.middleware.call_hook('update.post_update')
if attrs['reboot']:
- await self.middleware.call('system.reboot', {'delay': 10})
+ await self.middleware.call('system.reboot', SYSTEM_UPGRADE_REBOOT_REASON, {'delay': 10}, app=app)
return True
diff --git a/src/middlewared/middlewared/plugins/update_/freebsd_to_scale_linux.py b/src/middlewared/middlewared/plugins/update_/freebsd_to_scale_linux.py
deleted file mode 100644
index 20cf08d09f2fc..0000000000000
--- a/src/middlewared/middlewared/plugins/update_/freebsd_to_scale_linux.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import logging
-from pathlib import Path
-
-from middlewared.service import job, private, Service
-from middlewared.utils import run
-
-logger = logging.getLogger(__name__)
-
-
-class UpdateService(Service):
-
- @private
- def remove_files(self):
- for i in ("/data/freebsd-to-scale-update", "/var/lib/dbus/machine-id", "/etc/machine-id"):
- try:
- Path(i).unlink(missing_ok=True)
- except Exception:
- logger.error('Failed removing %r', i, exc_info=True)
-
- @private
- @job()
- async def freebsd_to_scale(self, job):
- logger.info("Updating CORE installation to SCALE")
-
- await self.middleware.run_in_thread(self.remove_files)
- await run(["systemd-machine-id-setup"], check=False)
- await self.middleware.call("etc.generate", "fstab", "initial")
- await run(["mount", "-a"])
-
- config = await self.middleware.call("system.advanced.config")
- if config["serialconsole"]:
- cp = await run(["systemctl", "enable", f"serial-getty@{config['serialport']}.service"], check=False)
- if cp.returncode:
- self.logger.error(
- "Failed to enable %r serial port service: %r", config["serialport"], cp.stderr.decode()
- )
-
- await self.middleware.call("etc.generate", "rc")
- await self.middleware.call("boot.update_initramfs")
- await self.middleware.call("etc.generate", "grub")
-
- await self.middleware.call("system.reboot")
diff --git a/src/middlewared/middlewared/plugins/vm/profiles.py b/src/middlewared/middlewared/plugins/vm/profiles.py
deleted file mode 100644
index 3f34efb30a355..0000000000000
--- a/src/middlewared/middlewared/plugins/vm/profiles.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from middlewared.schema import accepts, Dict, returns
-from middlewared.service import Service
-
-
-class VMService(Service):
-
- class Config:
- cli_namespace = 'service.vm'
-
- @accepts()
- @returns(Dict(additional_attrs=True))
- async def profiles(self):
- """
- Returns a dictionary of defaults for different VM guest types.
- """
- return {
- 'WINDOWS': {
- 'trusted_platform_module': True,
- 'memory': 8192,
- 'cores': 2,
- 'bootloader_ovmf': 'OVMF_CODE_4M.fd',
- },
- 'LINUX': {
- 'memory': 8192,
- 'cores': 2,
- },
- 'OTHERS': {
- 'memory': 8192,
- 'cores': 2,
- },
- }
diff --git a/src/middlewared/middlewared/plugins/vmware.py b/src/middlewared/middlewared/plugins/vmware.py
index 6559dfe03c152..9744285122bb0 100644
--- a/src/middlewared/middlewared/plugins/vmware.py
+++ b/src/middlewared/middlewared/plugins/vmware.py
@@ -356,32 +356,6 @@ def __get_datastores(self, data):
return datastores
- @accepts(Int('pk'), roles=['READONLY_ADMIN'])
- async def get_virtual_machines(self, pk):
- """
- Returns Virtual Machines on the VMWare host identified by `pk`.
- """
- await self.middleware.call('network.general.will_perform_activity', 'vmware')
-
- item = await self.query([('id', '=', pk)], {'get': True})
-
- server_instance = self.connect(item)
-
- content = server_instance.RetrieveContent()
- objview = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True)
- vm_view = objview.view
- objview.Destroy()
-
- vms = {}
- for vm in vm_view:
- data = {
- 'uuid': vm.config.uuid,
- 'name': vm.name,
- 'power_state': vm.summary.runtime.powerState,
- }
- vms[vm.config.uuid] = data
- return vms
-
@accepts(Str('dataset'), Bool('recursive'), roles=['READONLY_ADMIN'])
def dataset_has_vms(self, dataset, recursive):
"""
diff --git a/src/middlewared/middlewared/plugins/webui/crypto.py b/src/middlewared/middlewared/plugins/webui/crypto.py
index a0d4f5d7742ec..3d963c85ca42b 100644
--- a/src/middlewared/middlewared/plugins/webui/crypto.py
+++ b/src/middlewared/middlewared/plugins/webui/crypto.py
@@ -20,3 +20,7 @@ async def certificateauthority_profiles(self):
@accepts(Int('cert_id'), roles=['READONLY_ADMIN'])
async def get_certificate_domain_names(self, cert_id):
return await self.middleware.call('certificate.get_domain_names', cert_id)
+
+ @accepts(roles=['READONLY_ADMIN'])
+ async def csr_profiles(self):
+ return await self.middleware.call('certificate.certificate_signing_requests_profiles')
diff --git a/src/middlewared/middlewared/plugins/zfs_/snapshot.py b/src/middlewared/middlewared/plugins/zfs_/snapshot.py
index 0ae1b1a696a34..4031e8646f613 100644
--- a/src/middlewared/middlewared/plugins/zfs_/snapshot.py
+++ b/src/middlewared/middlewared/plugins/zfs_/snapshot.py
@@ -250,30 +250,6 @@ def do_update(self, snap_id, data):
else:
return self.middleware.call_sync('zfs.snapshot.get_instance', snap_id)
- @accepts(
- Dict(
- 'snapshot_remove',
- Str('dataset', required=True),
- Str('name', required=True),
- Bool('defer_delete')
- ),
- roles=['SNAPSHOT_DELETE'],
- )
- def remove(self, data):
- """
- Remove a snapshot from a given dataset.
-
- Returns:
- bool: True if succeed otherwise False.
- """
- self.logger.debug('zfs.snapshot.remove is deprecated, use zfs.snapshot.delete')
- snapshot_name = data['dataset'] + '@' + data['name']
- try:
- self.do_delete(snapshot_name, {'defer': data.get('defer_delete') or False})
- except Exception:
- return False
- return True
-
@accepts(
Str('id'),
Dict(
diff --git a/src/middlewared/middlewared/pytest/unit/common/smart/test_smartctl.py b/src/middlewared/middlewared/pytest/unit/common/smart/test_smartctl.py
index 9f553d230f9ab..88836a6954e18 100644
--- a/src/middlewared/middlewared/pytest/unit/common/smart/test_smartctl.py
+++ b/src/middlewared/middlewared/pytest/unit/common/smart/test_smartctl.py
@@ -6,13 +6,13 @@
@pytest.mark.asyncio
async def test__get_smartctl_args__disk_nonexistent():
- context = SMARTCTX(devices={}, enterprise_hardware=False)
+ context = SMARTCTX(devices={}, enterprise_hardware=False, middleware=None)
assert await get_smartctl_args(context, "sda", "") is None
@pytest.mark.asyncio
async def test__get_smartctl_args__nvme():
- context = SMARTCTX(devices={}, enterprise_hardware=False)
+ context = SMARTCTX(devices={}, enterprise_hardware=False, middleware=None)
assert await get_smartctl_args(context, "nvme0n1", "") == ["/dev/nvme0n1", "-d", "nvme"]
@@ -46,6 +46,7 @@ async def test_get_disk__unknown_usb_bridge():
},
},
enterprise_hardware=False,
+ middleware=None,
)
assert await get_smartctl_args(context, "sda", "") == ["/dev/sda", "-d", "sat"]
@@ -80,6 +81,7 @@ async def test_get_disk__generic():
},
},
enterprise_hardware=False,
+ middleware=None,
)
with patch("middlewared.common.smart.smartctl.run") as run:
run.return_value = Mock(stdout="Everything is OK")
@@ -117,6 +119,7 @@ async def test_get_disk__nvme_behind_sd():
},
},
enterprise_hardware=False,
+ middleware=None,
)
with patch("middlewared.common.smart.smartctl.run") as run:
run.return_value = Mock(stdout="Everything is OK")
diff --git a/src/middlewared/middlewared/pytest/unit/middleware.py b/src/middlewared/middlewared/pytest/unit/middleware.py
index eeb6794927d6d..88822c4b06cf9 100644
--- a/src/middlewared/middlewared/pytest/unit/middleware.py
+++ b/src/middlewared/middlewared/pytest/unit/middleware.py
@@ -3,12 +3,9 @@
from unittest.mock import AsyncMock, Mock
from middlewared.plugins.datastore.read import DatastoreService
-from middlewared.settings import conf
from middlewared.utils import filter_list
from middlewared.utils.plugins import SchemasMixin
-conf.debug_mode = False # FIXME: Find a proper way to disable return value schema validation in tests
-
class Middleware(SchemasMixin, dict):
diff --git a/src/middlewared/middlewared/pytest/unit/plugins/test_pci_ids_for_gpu_isolation.py b/src/middlewared/middlewared/pytest/unit/plugins/test_pci_ids_for_gpu_isolation.py
deleted file mode 100644
index cbe6320c49b8c..0000000000000
--- a/src/middlewared/middlewared/pytest/unit/plugins/test_pci_ids_for_gpu_isolation.py
+++ /dev/null
@@ -1,130 +0,0 @@
-import pytest
-
-from unittest.mock import Mock, patch
-
-from middlewared.pytest.unit.helpers import load_compound_service
-from middlewared.pytest.unit.middleware import Middleware
-
-
-SYSTEM_ADVANCED_SERVICE = load_compound_service('system.advanced')
-AVAILABLE_GPU = [
- {
- 'addr': {
- 'pci_slot': '0000:16:0e.0',
- 'domain': '0000',
- 'bus': '16',
- 'slot': '0e'
- },
- 'description': 'Red Hat, Inc. Virtio 1.0 GPU',
- 'devices': [
- {
- 'pci_id': '8086:29C0',
- 'pci_slot': '0000:16:0e.0',
- 'vm_pci_slot': 'pci_0000_16_0e_0'
- },
- {
- 'pci_id': '1AF4:1050',
- 'pci_slot': '0000:16:0e.2',
- 'vm_pci_slot': 'pci_0000_16_0e_2'
- },
- ],
- 'vendor': None,
- 'uses_system_critical_devices': False,
- 'available_to_host': True
- },
- {
- 'addr': {
- 'pci_slot': '0000:17:0e.0',
- 'domain': '0000',
- 'bus': '17',
- 'slot': '0e'
- },
- 'description': 'Red Hat, Inc. Virtio 1.0 GPU',
- 'devices': [
- {
- 'pci_id': '8086:29C0',
- 'pci_slot': '0000:17:0e.0',
- 'vm_pci_slot': 'pci_0000_17_0e_0'
- },
- {
- 'pci_id': '1AF4:1050',
- 'pci_slot': '0000:17:0e.2',
- 'vm_pci_slot': 'pci_0000_17_0e_2'
- },
- ],
- 'vendor': None,
- 'uses_system_critical_devices': False,
- 'available_to_host': False
- },
- {
- 'addr': {
- 'pci_slot': '0000:18:0e.0',
- 'domain': '0000',
- 'bus': '18',
- 'slot': '0e'
- },
- 'description': 'Red Hat, Inc. Virtio 1.0 GPU',
- 'devices': [
- {
- 'pci_id': '8086:29C0',
- 'pci_slot': '0000:18:0e.0',
- 'vm_pci_slot': 'pci_0000_18_0e_0'
- },
- {
- 'pci_id': '1AF4:1050',
- 'pci_slot': '0000:18:0e.2',
- 'vm_pci_slot': 'pci_0000_18_0e_2'
- },
- ],
- 'vendor': None,
- 'uses_system_critical_devices': True,
- 'available_to_host': True
- }
-]
-
-
-@pytest.mark.parametrize('isolated_gpu,keys,values', [
- (
- [],
- {
- 'Red Hat, Inc. Virtio 1.0 GPU [0000:16:0e.0]',
- 'Red Hat, Inc. Virtio 1.0 GPU [0000:17:0e.0]',
- },
- {
- '0000:16:0e.0',
- '0000:17:0e.0'
- }
- ),
- (
- ['0000:18:0e.0'],
- {
- 'Red Hat, Inc. Virtio 1.0 GPU [0000:16:0e.0]',
- 'Red Hat, Inc. Virtio 1.0 GPU [0000:17:0e.0]',
- 'Unknown \'0000:18:0e.0\' slot',
- },
- {
- '0000:16:0e.0',
- '0000:17:0e.0',
- '0000:18:0e.0'
- }
- ),
- (
- ['0000:19:0e.0'],
- {
- 'Red Hat, Inc. Virtio 1.0 GPU [0000:16:0e.0]',
- 'Red Hat, Inc. Virtio 1.0 GPU [0000:17:0e.0]',
- 'Unknown \'0000:19:0e.0\' slot',
- },
- {
- '0000:16:0e.0',
- '0000:17:0e.0',
- '0000:19:0e.0'
- }
- ),
-])
-def test_isolate_gpu_choices(isolated_gpu, keys, values):
- m = Middleware()
- m['system.advanced.config'] = lambda *args: {'isolated_gpu_pci_ids': isolated_gpu}
- with patch('middlewared.plugins.system_advanced.gpu.get_gpus', Mock(return_value=AVAILABLE_GPU)):
- assert set(SYSTEM_ADVANCED_SERVICE(m).get_gpu_pci_choices().keys()) == keys
- assert set(SYSTEM_ADVANCED_SERVICE(m).get_gpu_pci_choices().values()) == values
diff --git a/src/middlewared/middlewared/pytest/unit/plugins/test_smart.py b/src/middlewared/middlewared/pytest/unit/plugins/test_smart.py
index 27cebbae10e3f..911fc2f62358d 100644
--- a/src/middlewared/middlewared/pytest/unit/plugins/test_smart.py
+++ b/src/middlewared/middlewared/pytest/unit/plugins/test_smart.py
@@ -46,24 +46,24 @@ def test__parse_smart_selftest_results__ataprint__1():
}
}
assert parse_smart_selftest_results(data) == [
- AtaSelfTest(
- num=0,
- description="Short offline",
- status="SUCCESS",
- status_verbose="Completed without error",
- remaining=0.0,
- lifetime=16590,
- lba_of_first_error=None
- ),
- AtaSelfTest(
- num=1,
- description="Short offline",
- status="SUCCESS",
- status_verbose="Completed without error",
- remaining=0.0,
- lifetime=16589,
- lba_of_first_error=None
- )
+ {
+ "num": 0,
+ "description": "Short offline",
+ "status": "SUCCESS",
+ "status_verbose": "Completed without error",
+ "remaining": 0.0,
+ "lifetime": 16590,
+ "lba_of_first_error": None
+ },
+ {
+ "num": 1,
+ "description": "Short offline",
+ "status": "SUCCESS",
+ "status_verbose": "Completed without error",
+ "remaining": 0.0,
+ "lifetime": 16589,
+ "lba_of_first_error": None
+ }
]
@@ -93,15 +93,15 @@ def test__parse_smart_selftest_results__ataprint__2():
}
}
assert parse_smart_selftest_results(data) == [
- AtaSelfTest(
- num=0,
- description="Offline",
- status="RUNNING",
- status_verbose="Self-test routine in progress",
- remaining=1.0,
- lifetime=0,
- lba_of_first_error=None
- )
+ {
+ "num": 0,
+ "description": "Offline",
+ "status": "RUNNING",
+ "status_verbose": "Self-test routine in progress",
+ "remaining": 1.0,
+ "lifetime": 0,
+ "lba_of_first_error": None
+ }
]
@@ -123,18 +123,18 @@ def test__parse_smart_selftest_results__nvmeprint__1():
"error_count_outdated": 0
}
}) == [
- NvmeSelfTest(
- num=0,
- description="Short",
- status="SUCCESS",
- status_verbose="Completed without error",
- power_on_hours=18636,
- failing_lba=None,
- nsid=None,
- seg=None,
- sct=0x0,
- code=0x0
- ),
+ {
+ "num": 0,
+ "description": "Short",
+ "status": "SUCCESS",
+ "status_verbose": "Completed without error",
+ "power_on_hours": 18636,
+ "failing_lba": None,
+ "nsid": None,
+ "seg": None,
+ "sct": 0x0,
+ "code": 0x0
+ },
]
@@ -152,15 +152,15 @@ def test__parse_smart_selftest_results__scsiprint__1():
}
}
}) == [
- ScsiSelfTest(
- num=0,
- description="Background short",
- status="FAILED",
- status_verbose="Completed, segment failed",
- segment_number=None,
- lifetime=3943,
- lba_of_first_error=None
- ),
+ {
+ "num": 0,
+ "description": "Background short",
+ "status": "FAILED",
+ "status_verbose": "Completed, segment failed",
+ "segment_number": None,
+ "lifetime": 3943,
+ "lba_of_first_error": None
+ }
]
diff --git a/src/middlewared/middlewared/pytest/unit/plugins/test_snapshot.py b/src/middlewared/middlewared/pytest/unit/plugins/test_snapshot.py
deleted file mode 100644
index 9fcf56e98fba8..0000000000000
--- a/src/middlewared/middlewared/pytest/unit/plugins/test_snapshot.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from unittest.mock import Mock
-
-import pytest
-
-from middlewared.plugins.snapshot import PeriodicSnapshotTaskService
-
-
-@pytest.mark.parametrize("task,count", [
- ({"schedule": {"hour": "0", "minute": "0", "dom": "*", "month": "*", "dow": "*"},
- "lifetime_value": 1, "lifetime_unit": "MONTH"}, 30),
- ({"schedule": {"hour": "0", "minute": "0", "dom": "*", "month": "*", "dow": "*"},
- "lifetime_value": 2, "lifetime_unit": "MONTH"}, 60),
- ({"schedule": {"hour": "0", "minute": "0", "dom": "*", "month": "*", "dow": "*"},
- "lifetime_value": 5, "lifetime_unit": "YEAR"}, 365 * 5),
- ({"schedule": {"hour": "*", "minute": "0", "dom": "*", "month": "*", "dow": "*"},
- "lifetime_value": 1, "lifetime_unit": "MONTH"}, 720),
- ({"schedule": {"hour": "*", "minute": "0", "dom": "*", "month": "*", "dow": "*", "begin": "16:00", "end": "18:59"},
- "lifetime_value": 1, "lifetime_unit": "MONTH"}, 90),
-])
-def test__snapshot_task__foreseen_count(task, count):
- assert PeriodicSnapshotTaskService(Mock()).foreseen_count(task) == count
diff --git a/src/middlewared/middlewared/rclone/remote/storjix.py b/src/middlewared/middlewared/rclone/remote/storjix.py
index 32bc3487f0aad..5ea355f8bd8bc 100644
--- a/src/middlewared/middlewared/rclone/remote/storjix.py
+++ b/src/middlewared/middlewared/rclone/remote/storjix.py
@@ -13,6 +13,10 @@
from middlewared.utils.network import INTERNET_TIMEOUT
+class StorjIxError(CallError):
+ pass
+
+
class StorjIxRcloneRemote(BaseRcloneRemote):
name = "STORJ_IX"
title = "Storj iX"
@@ -41,10 +45,19 @@ def create_bucket_sync():
aws_access_key_id=credentials["attributes"]["access_key_id"],
aws_secret_access_key=credentials["attributes"]["secret_access_key"],
)
+ # s3 bucket naming rules: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
try:
s3_client.create_bucket(Bucket=name)
except s3_client.exceptions.BucketAlreadyExists as e:
raise CallError(str(e), errno=errno.EEXIST)
+ except botocore.exceptions.ParamValidationError as e:
+ raise StorjIxError("The bucket name can only contain lowercase letters, numbers, and hyphens.",
+ errno.EINVAL, str(e))
+ except botocore.exceptions.ClientError as e:
+ if "InvalidBucketName" in e.args[0]:
+ raise StorjIxError("The bucket name must be between 3-63 characters in length and cannot contain "
+ "uppercase.", errno.EINVAL, str(e))
+ raise
return await self.middleware.run_in_thread(create_bucket_sync)
diff --git a/src/middlewared/middlewared/schema/processor.py b/src/middlewared/middlewared/schema/processor.py
index 994607e926182..4792b691a4b06 100644
--- a/src/middlewared/middlewared/schema/processor.py
+++ b/src/middlewared/middlewared/schema/processor.py
@@ -6,7 +6,6 @@
from middlewared.schema import Attribute
from middlewared.service_exception import CallError, ValidationErrors
-from middlewared.settings import conf
from .exceptions import Error
from .utils import NOT_PROVIDED
@@ -47,14 +46,10 @@ def returns_internal(f):
if asyncio.iscoroutinefunction(f):
async def nf(*args, **kwargs):
res = await f(*args, **kwargs)
- if conf.debug_mode:
- validate_return_type(f, res, nf.returns)
return res
else:
def nf(*args, **kwargs):
res = f(*args, **kwargs)
- if conf.debug_mode:
- validate_return_type(f, res, nf.returns)
return res
from middlewared.utils.type import copy_function_metadata
diff --git a/src/middlewared/middlewared/service/core_service.py b/src/middlewared/middlewared/service/core_service.py
index 4f7825fd8f29d..77b9fa1060617 100644
--- a/src/middlewared/middlewared/service/core_service.py
+++ b/src/middlewared/middlewared/service/core_service.py
@@ -25,9 +25,8 @@
from middlewared.common.environ import environ_update
from middlewared.job import Job
from middlewared.pipe import Pipes
-from middlewared.schema import accepts, Any, Bool, Datetime, Dict, Int, List, returns, Str
+from middlewared.schema import accepts, Any, Bool, Datetime, Dict, Int, List, Str
from middlewared.service_exception import CallError, ValidationErrors
-from middlewared.settings import conf
from middlewared.utils import BOOTREADY, filter_list, MIDDLEWARE_RUN_DIR
from middlewared.utils.debug import get_frame_details, get_threads_stacks
from middlewared.utils.privilege import credential_has_full_admin, credential_is_limited_to_own_jobs
@@ -66,18 +65,6 @@ async def resize_shell(self, id_, cols, rows):
shell.resize(cols, rows)
- @accepts(Bool('debug_mode'))
- async def set_debug_mode(self, debug_mode):
- """
- Set `debug_mode` for middleware.
- """
- conf.debug_mode = debug_mode
-
- @accepts()
- @returns(Bool())
- async def debug_mode_enabled(self):
- return conf.debug_mode
-
@private
def get_tasks(self):
for task in asyncio.all_tasks(loop=self.middleware.loop):
diff --git a/src/middlewared/middlewared/settings.py b/src/middlewared/middlewared/settings.py
deleted file mode 100644
index 013444594357d..0000000000000
--- a/src/middlewared/middlewared/settings.py
+++ /dev/null
@@ -1,6 +0,0 @@
-class Configuration:
- def __init__(self, debug_mode=True):
- self.debug_mode = debug_mode
-
-
-conf = Configuration()
diff --git a/src/middlewared/middlewared/test/integration/assets/product.py b/src/middlewared/middlewared/test/integration/assets/product.py
index cf28aaae16f49..9fc51a04ef9d2 100644
--- a/src/middlewared/middlewared/test/integration/assets/product.py
+++ b/src/middlewared/middlewared/test/integration/assets/product.py
@@ -1,6 +1,6 @@
import contextlib
-from middlewared.test.integration.utils import call, mock
+from middlewared.test.integration.utils import mock
@contextlib.contextmanager
diff --git a/src/middlewared/middlewared/test/integration/utils/docker.py b/src/middlewared/middlewared/test/integration/utils/docker.py
index 01edb9c1474b2..d41f61f2d52d6 100644
--- a/src/middlewared/middlewared/test/integration/utils/docker.py
+++ b/src/middlewared/middlewared/test/integration/utils/docker.py
@@ -3,6 +3,7 @@
IX_APPS_DIR_NAME = '.ix-apps'
IX_APPS_MOUNT_PATH: str = os.path.join('/mnt', IX_APPS_DIR_NAME)
+IX_APPS_CATALOG_PATH: str = os.path.join(IX_APPS_MOUNT_PATH, 'truenas_catalog')
DOCKER_DATASET_PROPS = {
'aclmode': 'discard',
diff --git a/src/middlewared/middlewared/test/integration/utils/smb.py b/src/middlewared/middlewared/test/integration/utils/smb.py
new file mode 100644
index 0000000000000..a83ac2931329d
--- /dev/null
+++ b/src/middlewared/middlewared/test/integration/utils/smb.py
@@ -0,0 +1,42 @@
+import contextlib
+import os
+import sys
+
+try:
+ apifolder = os.getcwd()
+ sys.path.append(apifolder)
+ from protocols.smb_proto import SMB, security
+except ImportError:
+ SMB = None
+ security = None
+
+from .client import truenas_server
+
+__all__ = ["smb_connection"]
+
+
+@contextlib.contextmanager
+def smb_connection(
+ host=None,
+ share=None,
+ encryption='DEFAULT',
+ username=None,
+ domain=None,
+ password=None,
+ smb1=False
+):
+ s = SMB()
+ s.connect(
+ host=host or truenas_server.ip,
+ share=share,
+ encryption=encryption,
+ username=username,
+ domain=domain,
+ password=password,
+ smb1=smb1
+ )
+
+ try:
+ yield s
+ finally:
+ s.disconnect()
diff --git a/src/middlewared/middlewared/utils/__init__.py b/src/middlewared/middlewared/utils/__init__.py
index aa221e9e5ca5b..bf50437379797 100644
--- a/src/middlewared/middlewared/utils/__init__.py
+++ b/src/middlewared/middlewared/utils/__init__.py
@@ -8,6 +8,7 @@
import time
import json
from collections import namedtuple
+from dataclasses import dataclass
from datetime import datetime
from middlewared.service_exception import MatchNotFound
@@ -15,12 +16,28 @@
from .prctl import die_with_parent
from .threading import io_thread_pool_executor
+
+# Define Product Strings
+@dataclass(slots=True, frozen=True)
+class ProductTypes:
+ SCALE: str = 'SCALE'
+ SCALE_ENTERPRISE: str = 'SCALE_ENTERPRISE'
+
+
+@dataclass(slots=True, frozen=True)
+class ProductNames:
+ PRODUCT_NAME: str = 'TrueNAS'
+
+
+ProductType = ProductTypes()
+ProductName = ProductNames()
+
MID_PID = None
MIDDLEWARE_RUN_DIR = '/var/run/middleware'
BOOTREADY = f'{MIDDLEWARE_RUN_DIR}/.bootready'
MANIFEST_FILE = '/data/manifest.json'
-BRAND = 'TrueNAS'
-PRODUCT = 'SCALE'
+BRAND = ProductName.PRODUCT_NAME
+PRODUCT = ProductType.SCALE
BRAND_PRODUCT = f'{BRAND}-{PRODUCT}'
NULLS_FIRST = 'nulls_first:'
NULLS_LAST = 'nulls_last:'
diff --git a/src/middlewared/middlewared/utils/filesystem/copy.py b/src/middlewared/middlewared/utils/filesystem/copy.py
index d0b201dca93a4..64595f79ca169 100644
--- a/src/middlewared/middlewared/utils/filesystem/copy.py
+++ b/src/middlewared/middlewared/utils/filesystem/copy.py
@@ -15,6 +15,7 @@
fchown,
fstat,
getxattr,
+ listxattr,
lseek,
makedev,
mkdir,
@@ -604,6 +605,29 @@ def copytree(
try:
with DirectoryIterator(src, request_mask=int(dir_request_mask), as_dict=False) as d_iter:
_copytree_impl(d_iter, dst, dst_fd, CLONETREE_ROOT_DEPTH, config, fstat(dst_fd), stats)
+
+ # Ensure that root level directory also gets metadata copied
+ try:
+ xattrs = listxattr(d_iter.dir_fd)
+ if config.flags.value & CopyFlags.PERMISSIONS.value:
+ copy_permissions(d_iter.dir_fd, dst_fd, xattrs, d_iter.stat.stx_mode)
+
+ if config.flags.value & CopyFlags.XATTRS.value:
+ copy_xattrs(d_iter.dir_fd, dst_fd, xattrs)
+
+ if config.flags.value & CopyFlags.OWNER.value:
+ fchown(dst_fd, d_iter.stat.stx_uid, d_iter.stat.stx_gid)
+
+ if config.flags.value & CopyFlags.TIMESTAMPS.value:
+ ns_ts = (
+ timespec_convert_int(d_iter.stat.stx_atime),
+ timespec_convert_int(d_iter.stat.stx_mtime)
+ )
+ utime(dst_fd, ns=ns_ts)
+ except Exception:
+ if config.raise_error:
+ raise
+
finally:
close(dst_fd)
diff --git a/tests/api2/test_070_alertservice.py b/tests/api2/test_070_alertservice.py
index 867c1618065fd..9540aaf651723 100644
--- a/tests/api2/test_070_alertservice.py
+++ b/tests/api2/test_070_alertservice.py
@@ -1,11 +1,8 @@
-import pytest
-
from middlewared.test.integration.utils import call
def test_alert_gets():
call("alertservice.query")
- call("alertservice.list_types")
def test_alertservice():
diff --git a/tests/api2/test_261_iscsi_cmd.py b/tests/api2/test_261_iscsi_cmd.py
index 160a362d966e4..272ab625519a2 100644
--- a/tests/api2/test_261_iscsi_cmd.py
+++ b/tests/api2/test_261_iscsi_cmd.py
@@ -1782,7 +1782,7 @@ def _ha_reboot_master(delay=900):
orig_master_node = _get_node()
new_master_node = other_node(orig_master_node)
- call('system.reboot')
+ call('system.reboot', 'iSCSI test')
# First we'll loop until the node is no longer the orig_node
new_master = False
diff --git a/tests/api2/test_262_iscsi_alua.py b/tests/api2/test_262_iscsi_alua.py
index e6ffe8cac126d..55ed91484e584 100644
--- a/tests/api2/test_262_iscsi_alua.py
+++ b/tests/api2/test_262_iscsi_alua.py
@@ -582,7 +582,7 @@ def test_boot_complex_alua_config(self, fix_write_patterns, fix_get_domain, fix_
if newnode != fix_orig_active_node:
if self.VERBOSE:
print(f'Restoring {fix_orig_active_node} as MASTER')
- call('system.reboot')
+ call('system.reboot', 'iSCSI ALUA test')
newnode2 = self.wait_for_new_master(newnode)
assert newnode2 == fix_orig_active_node
self.wait_for_backup()
diff --git a/tests/api2/test_344_acl_templates.py b/tests/api2/test_344_acl_templates.py
index a91f1159a0271..798c8937bdaba 100644
--- a/tests/api2/test_344_acl_templates.py
+++ b/tests/api2/test_344_acl_templates.py
@@ -94,47 +94,6 @@ def test_03_create_new_template(request, acltype):
assert results.status_code == 200, results.text
-def test_04_legacy_check_default_acl_choices(request):
- """
- Verify that our new templates appear as choices for "default" ACLs.
- """
- depends(request, ["NEW_ACLTEMPLATES_CREATED"], scope="session")
-
- results = GET(
- '/filesystem/acltemplate', payload={
- 'query-filters': [['builtin', '=', False]],
- }
- )
- assert results.status_code == 200, results.text
-
- names = [x['name'] for x in results.json()]
-
- results = POST('/filesystem/default_acl_choices')
- assert results.status_code == 200, results.text
- acl_choices = results.json()
-
- for name in names:
- assert name in acl_choices, results.text
-
-
-@pytest.mark.parametrize('acltype', ['NFS4', 'POSIX'])
-def test_05_legacy_check_default_acl_choices_by_path(request, acltype):
- """
- Verify that our new templates appear as choices for "default" ACLs
- given a path.
- """
- depends(request, ["NEW_ACLTEMPLATES_CREATED"], scope="session")
- inverse = 'POSIX' if acltype == 'NFS4' else 'NFS4'
-
- path = f'/mnt/{pool_name}/acltemplate_{"posix" if acltype == "POSIX" else "nfsv4"}'
- results = POST('/filesystem/default_acl_choices', payload=path)
- assert results.status_code == 200, results.text
-
- choices = results.json()
- assert f'{acltype}_TEST' in choices, results.text
- assert f'{inverse}_TEST' not in choices, results.text
-
-
@pytest.mark.dependency(name="NEW_ACLTEMPLATES_UPDATED")
@pytest.mark.parametrize('acltype', ['NFS4', 'POSIX'])
def test_09_update_new_template(request, acltype):
diff --git a/tests/api2/test_427_smb_acl.py b/tests/api2/test_427_smb_acl.py
index 0e6e82e2ee238..becbda5957528 100644
--- a/tests/api2/test_427_smb_acl.py
+++ b/tests/api2/test_427_smb_acl.py
@@ -18,8 +18,8 @@
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call
from middlewared.test.integration.utils.client import truenas_server
+from middlewared.test.integration.utils.smb import security, smb_connection
from middlewared.test.integration.utils.unittest import RegexString
-from protocols import SMB
from pytest_dependency import depends
from time import sleep
from utils import create_dataset
@@ -252,15 +252,20 @@ def test_007_test_disable_autoinherit(request):
path = f'/mnt/{pool_name}/{ds}'
with create_dataset(f'{pool_name}/{ds}', {'share_type': 'SMB'}):
with smb_share(path, 'NFS4_INHERIT'):
- c = SMB()
- c.connect(share='NFS4_INHERIT', username=SMB_USER, password=SMB_PWD, smb1=False)
- c.mkdir('foo')
- sd = c.get_sd('foo')
- assert 'SEC_DESC_DACL_PROTECTED' not in sd['control']['parsed'], str(sd)
- c.inherit_acl('foo', 'COPY')
- sd = c.get_sd('foo')
- assert 'SEC_DESC_DACL_PROTECTED' in sd['control']['parsed'], str(sd)
- c.disconnect()
+ with smb_connection(
+ share='NFS4_INHERIT',
+ username=SMB_USER,
+ password=SMB_PWD
+ ) as c:
+ c.mkdir('foo')
+ fh = c.create_file('foo', 'r')
+ sd = c.get_sd(fh, security.SECINFO_DACL)
+ c.close(fh)
+ assert sd.type & security.SEC_DESC_DACL_PROTECTED == 0, sd.as_sddl()
+ c.inherit_acl('foo', 'COPY')
+ fh = c.create_file('foo', 'r')
+ sd = c.get_sd(fh, security.SECINFO_DACL)
+ assert sd.type & security.SEC_DESC_DACL_PROTECTED, sd.as_sddl()
def test_008_test_prevent_smb_dataset_update(request):
diff --git a/tests/api2/test_catalogs.py b/tests/api2/test_catalogs.py
new file mode 100644
index 0000000000000..4c0e993daf7c9
--- /dev/null
+++ b/tests/api2/test_catalogs.py
@@ -0,0 +1,86 @@
+import os.path
+
+import pytest
+
+from middlewared.test.integration.assets.pool import another_pool
+from middlewared.test.integration.utils import call
+from middlewared.test.integration.utils.docker import IX_APPS_CATALOG_PATH
+
+
+@pytest.fixture(scope='module')
+def docker_pool(request):
+ with another_pool() as pool:
+ yield pool['name']
+
+
+@pytest.mark.dependency(name='unconfigure_apps')
+def test_unconfigure_apps():
+ config = call('docker.update', {'pool': None}, job=True)
+ assert config['pool'] is None, config
+
+
+@pytest.mark.dependency(depends=['unconfigure_apps'])
+def test_catalog_sync():
+ call('catalog.sync', job=True)
+ assert call('catalog.synced') is True
+
+
+@pytest.mark.dependency(depends=['unconfigure_apps'])
+def test_catalog_cloned_location():
+ config = call('catalog.config')
+ assert config['location'] == '/var/run/middleware/ix-apps/catalogs', config
+
+
+@pytest.mark.dependency(depends=['unconfigure_apps'])
+def test_apps_are_being_reported():
+ assert call('app.available', [], {'count': True}) != 0
+
+
+@pytest.mark.dependency(name='docker_setup')
+def test_docker_setup(docker_pool):
+ config = call('docker.update', {'pool': docker_pool}, job=True)
+ assert config['pool'] == docker_pool, config
+
+
+@pytest.mark.dependency(depends=['docker_setup'])
+def test_catalog_synced_properly():
+ assert call('catalog.synced') is True
+
+
+@pytest.mark.dependency(depends=['docker_setup'])
+def test_catalog_sync_location():
+ assert call('catalog.config')['location'] == IX_APPS_CATALOG_PATH
+
+
+@pytest.mark.dependency(depends=['docker_setup'])
+def test_catalog_location_existence():
+ docker_config = call('docker.config')
+ assert docker_config['pool'] is not None
+
+ assert call('filesystem.statfs', IX_APPS_CATALOG_PATH)['source'] == os.path.join(
+ docker_config['dataset'], 'truenas_catalog'
+ )
+
+
+@pytest.mark.dependency(depends=['docker_setup'])
+def test_apps_are_being_reported_after_docker_setup():
+ assert call('app.available', [], {'count': True}) != 0
+
+
+@pytest.mark.dependency(depends=['docker_setup'])
+def test_categories_are_being_reported():
+ assert len(call('app.categories')) != 0
+
+
+@pytest.mark.dependency(depends=['docker_setup'])
+def test_app_version_details():
+ app_details = call('catalog.get_app_details', 'plex', {'train': 'stable'})
+ assert app_details['name'] == 'plex', app_details
+
+ assert len(app_details['versions']) != 0, app_details
+
+
+@pytest.mark.dependency(depends=['docker_setup'])
+def test_unconfigure_apps_after_setup():
+ config = call('docker.update', {'pool': None}, job=True)
+ assert config['pool'] is None, config
diff --git a/tests/api2/test_cloud_sync_storj.py b/tests/api2/test_cloud_sync_storj.py
index ae4fac02158c0..5c45be4b264a7 100644
--- a/tests/api2/test_cloud_sync_storj.py
+++ b/tests/api2/test_cloud_sync_storj.py
@@ -1,27 +1,14 @@
-import os
-import sys
-
import pytest
+from config import (
+ STORJ_IX_AWS_ACCESS_KEY_ID,
+ STORJ_IX_AWS_SECRET_ACCESS_KEY,
+ STORJ_IX_BUCKET,
+)
from middlewared.test.integration.utils import call, ssh
from middlewared.test.integration.assets.cloud_sync import credential, task, run_task
from middlewared.test.integration.assets.pool import dataset
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-
-pytestmark = pytest.mark.skip(reason='See IT ticket IT-9829')
-try:
- from config import (
- STORJ_IX_AWS_ACCESS_KEY_ID,
- STORJ_IX_AWS_SECRET_ACCESS_KEY,
- STORJ_IX_BUCKET,
- )
-except ImportError:
- pytestmark = pytest.mark.skip(reason='Storj credential are missing in config.py')
- STORJ_IX_AWS_ACCESS_KEY_ID = None
- STORJ_IX_AWS_SECRET_ACCESS_KEY = None
- STORJ_IX_BUCKET = None
CREDENTIAL = {
"provider": "STORJ_IX",
@@ -34,6 +21,7 @@
"bucket": STORJ_IX_BUCKET,
"folder": "",
}
+FILENAME = "a"
def test_storj_verify():
@@ -58,16 +46,31 @@ def test_storj_list_buckets(storj_credential):
assert any(item["Name"] == STORJ_IX_BUCKET for item in call("cloudsync.list_buckets", storj_credential["id"]))
-def test_storj_list_directory(storj_credential):
+@pytest.fixture(scope="module")
+def storj_sync(storj_credential):
+ """Reset the remote bucket to only contain a single empty file."""
+ with dataset("test_storj_sync") as ds:
+ ssh(f"touch /mnt/{ds}/{FILENAME}")
+ with task({
+ "direction": "PUSH",
+ "transfer_mode": "SYNC",
+ "path": f"/mnt/{ds}",
+ "credentials": storj_credential["id"],
+ "attributes": TASK_ATTRIBUTES,
+ }) as t:
+ run_task(t)
+
+
+def test_storj_list_directory(storj_credential, storj_sync):
result = call("cloudsync.list_directory", {
"credentials": storj_credential["id"],
"attributes": TASK_ATTRIBUTES,
})
assert len(result) == 1
- assert result[0]["Name"] == "a"
+ assert result[0]["Name"] == FILENAME
-def test_storj_sync(storj_credential):
+def test_storj_pull(storj_credential, storj_sync):
with dataset("test_storj_sync") as ds:
with task({
"direction": "PULL",
@@ -78,4 +81,4 @@ def test_storj_sync(storj_credential):
}) as t:
run_task(t)
- assert ssh(f"ls /mnt/{ds}") == "a\n"
+ assert ssh(f"ls /mnt/{ds}") == FILENAME + "\n"
diff --git a/tests/api2/test_smb_null_empty_dacl.py b/tests/api2/test_smb_null_empty_dacl.py
new file mode 100644
index 0000000000000..130e87af9f1ce
--- /dev/null
+++ b/tests/api2/test_smb_null_empty_dacl.py
@@ -0,0 +1,135 @@
+import json
+import os
+import pytest
+
+from middlewared.test.integration.assets.smb import smb_share
+from middlewared.test.integration.assets.pool import dataset
+from middlewared.test.integration.utils import call, ssh
+from middlewared.test.integration.utils.smb import security, smb_connection
+from samba import ntstatus, NTSTATUSError
+
+
+ADV_PERMS_FIELDS = [
+ 'READ_DATA', 'WRITE_DATA', 'APPEND_DATA',
+ 'READ_NAMED_ATTRS', 'WRITE_NAMED_ATTRS',
+ 'EXECUTE',
+ 'DELETE_CHILD', 'DELETE',
+ 'READ_ATTRIBUTES', 'WRITE_ATTRIBUTES',
+ 'READ_ACL', 'WRITE_ACL',
+ 'WRITE_OWNER',
+ 'SYNCHRONIZE'
+]
+
+NULL_DACL_PERMS = {'BASIC': 'FULL_CONTROL'}
+EMPTY_DACL_PERMS = {perm: False for perm in ADV_PERMS_FIELDS}
+
+
+@pytest.fixture(scope='function')
+def share():
+ with dataset('null_dacl_test', {'share_type': 'SMB'}) as ds:
+ with smb_share(f'/mnt/{ds}', 'DACL_TEST_SHARE') as s:
+ yield {'ds': ds, 'share': s}
+
+
+def set_special_acl(path, special_acl_type):
+ match special_acl_type:
+ case 'NULL_DACL':
+ permset = NULL_DACL_PERMS
+ case 'EMPTY_DACL':
+ permset = EMPTY_DACL_PERMS
+ case _:
+ raise TypeError(f'[EDOOFUS]: {special_acl_type} unexpected special ACL type')
+
+ payload = json.dumps({'acl': [{
+ 'tag': 'everyone@',
+ 'id': -1,
+ 'type': 'ALLOW',
+ 'perms': permset,
+ 'flags': {'BASIC': 'NOINHERIT'},
+ }]})
+ ssh(f'touch {path}')
+
+ # Use SSH to write to avoid middleware ACL normalization and validation
+ # that prevents writing these specific ACLs.
+ ssh(f"nfs4xdr_setfacl -j '{payload}' {path}")
+
+
+def test_null_dacl_set(unprivileged_user_fixture, share):
+ """ verify that setting NULL DACL results in expected ZFS ACL """
+ with smb_connection(
+ share=share['share']['name'],
+ username=unprivileged_user_fixture.username,
+ password=unprivileged_user_fixture.password,
+ ) as c:
+ fh = c.create_file('test_null_dacl', 'w')
+ current_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP)
+ current_sd.dacl = None
+ c.set_sd(fh, current_sd, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL)
+
+ new_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL)
+ assert new_sd.dacl is None
+
+ theacl = call('filesystem.getacl', os.path.join(share['share']['path'], 'test_null_dacl'))
+ assert len(theacl['acl']) == 1
+
+ assert theacl['acl'][0]['perms'] == NULL_DACL_PERMS
+ assert theacl['acl'][0]['type'] == 'ALLOW'
+ assert theacl['acl'][0]['tag'] == 'everyone@'
+
+
+def test_null_dacl_functional(unprivileged_user_fixture, share):
+ """ verify that NULL DACL grants write privileges """
+ testfile = os.path.join(share['share']['path'], 'test_null_dacl_write')
+ set_special_acl(testfile, 'NULL_DACL')
+ data = b'canary'
+
+ with smb_connection(
+ share=share['share']['name'],
+ username=unprivileged_user_fixture.username,
+ password=unprivileged_user_fixture.password,
+ ) as c:
+ fh = c.create_file('test_null_dacl_write', 'w')
+ current_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP)
+ assert current_sd.dacl is None
+
+ c.write(fh, data)
+ assert c.read(fh, 0, cnt=len(data)) == data
+
+
+def test_empty_dacl_set(unprivileged_user_fixture, share):
+ """ verify that setting empty DACL results in expected ZFS ACL """
+ with smb_connection(
+ share=share['share']['name'],
+ username=unprivileged_user_fixture.username,
+ password=unprivileged_user_fixture.password,
+ ) as c:
+ fh = c.create_file('test_empty_dacl', 'w')
+ current_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP)
+ current_sd.dacl = security.acl()
+ c.set_sd(fh, current_sd, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL)
+
+ new_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL)
+ assert new_sd.dacl.num_aces == 0
+
+ theacl = call('filesystem.getacl', os.path.join(share['share']['path'], 'test_empty_dacl'))
+ assert len(theacl['acl']) == 1
+
+ assert theacl['acl'][0]['perms'] == EMPTY_DACL_PERMS
+ assert theacl['acl'][0]['type'] == 'ALLOW'
+ assert theacl['acl'][0]['tag'] == 'everyone@'
+
+
+def test_empty_dacl_functional(unprivileged_user_fixture, share):
+ testfile = os.path.join(share['share']['path'], 'test_empty_dacl_write')
+ set_special_acl(testfile, 'EMPTY_DACL')
+
+ with smb_connection(
+ share=share['share']['name'],
+ username=unprivileged_user_fixture.username,
+ password=unprivileged_user_fixture.password,
+ ) as c:
+ # File has empty ACL and is not owned by this user
+ with pytest.raises(NTSTATUSError) as nt_err:
+ c.create_file('test_empty_dacl_write', 'w')
+
+ assert nt_err.value.args[0] == ntstatus.NT_STATUS_ACCESS_DENIED
diff --git a/tests/api2/test_system_dataset.py b/tests/api2/test_system_dataset.py
index 46c49778fefb7..d635c6ab092bf 100644
--- a/tests/api2/test_system_dataset.py
+++ b/tests/api2/test_system_dataset.py
@@ -77,3 +77,11 @@ def test_system_dataset_mountpoints():
assert ds_stats["uid"] == system_dataset_spec["chown_config"]["uid"]
assert ds_stats["gid"] == system_dataset_spec["chown_config"]["gid"]
assert ds_stats["mode"] & 0o777 == system_dataset_spec["chown_config"]["mode"]
+
+
+def test_netdata_post_mount_action():
+ # We rely on this to make sure system dataset post mount actions are working as intended
+ ds_stats = call("filesystem.stat", "/var/db/system/netdata/ix_state")
+ assert ds_stats["uid"] == 999, ds_stats
+ assert ds_stats["gid"] == 997, ds_stats
+ assert ds_stats["mode"] & 0o777 == 0o755, ds_stats
diff --git a/tests/api2/test_system_lifetime.py b/tests/api2/test_system_lifetime.py
new file mode 100644
index 0000000000000..a1bd282279435
--- /dev/null
+++ b/tests/api2/test_system_lifetime.py
@@ -0,0 +1,38 @@
+import time
+
+import pytest
+
+from middlewared.test.integration.utils import call
+
+from auto_config import ha
+
+
+@pytest.mark.skipif(
+ ha,
+ reason="Cannot be tested on a HA system since rebooting this node will just fail over to another node",
+)
+def test_system_reboot():
+ boot_id = call("system.boot_id")
+
+ call("system.reboot", "Integration test")
+
+ for i in range(180):
+ try:
+ new_boot_id = call("system.boot_id")
+ except Exception:
+ pass
+ else:
+ if new_boot_id != boot_id:
+ break
+
+ time.sleep(1)
+ else:
+ assert False, "System did not reboot"
+
+ audit = call("audit.query", {
+ "services": ["MIDDLEWARE"],
+ "query-filters": [
+ ["event", "=", "REBOOT"],
+ ],
+ })
+ assert audit[-1]["event_data"] == {"reason": "Integration test"}
diff --git a/tests/protocols/smb_proto.py b/tests/protocols/smb_proto.py
index 0823486a93178..7fe6fdd1566cf 100644
--- a/tests/protocols/smb_proto.py
+++ b/tests/protocols/smb_proto.py
@@ -324,53 +324,11 @@ def set_quota(self, **kwargs):
quotaout = smbcquotas.stdout.decode().splitlines()
return self._parse_quota(quotaout)
- def get_sd(self, path):
- def get_offset_by_key(data, key):
- for idx, entry in enumerate(data):
- if entry.startswith(key):
- return data[idx:]
+ def set_sd(self, idx, secdesc, security_info):
+ self._connection.set_sd(self._open_files[idx]["fh"], secdesc, security_info)
- raise ValueError(f'Failed to parse ACL: {data}')
-
- cmd = [
- "smbcacls", f"//{self._host}/{self._share}",
- "-U", f"{self._username}%{self._password}",
- "--numeric"
- ]
-
- if self._smb1:
- cmd.extend(["-m", "NT1"])
-
- cmd.append(path)
-
- cl = subprocess.run(cmd, capture_output=True)
- if cl.returncode != 0:
- raise RuntimeError(cl.stdout.decode() or cl.stderr.decode())
-
- output = get_offset_by_key(cl.stdout.decode().splitlines(), 'REVISION')
- revision = int(output[0].split(':')[1])
- control = {"raw": output[1].split(':')[1]}
- control['parsed'] = [x.name for x in ACLControl if int(control['raw'], 16) & x]
-
- sd = {
- "revision": revision,
- "control": control,
- "owner": output[2].split(':')[1],
- "group": output[3].split(':')[1],
- "acl": []
- }
- for l in get_offset_by_key(output, 'ACL'):
- entry, flags, access_mask = l.split("/")
- prefix, trustee, ace_type = entry.split(":")
-
- sd['acl'].append({
- "trustee": trustee,
- "type": int(ace_type),
- "access_mask": int(access_mask, 16),
- "flags": int(flags, 16),
- })
-
- return sd
+ def get_sd(self, idx, security_info):
+ return self._connection.get_sd(self._open_files[idx]["fh"], security_info)
def inherit_acl(self, path, action):
cmd = [
@@ -395,4 +353,3 @@ def inherit_acl(self, path, action):
cl = subprocess.run(cmd, capture_output=True)
if cl.returncode != 0:
raise RuntimeError(cl.stdout.decode() or cl.stderr.decode())
-
diff --git a/tests/runtest.py b/tests/runtest.py
index 4954431f66f8b..b537ff7c26a14 100755
--- a/tests/runtest.py
+++ b/tests/runtest.py
@@ -48,7 +48,6 @@
--vm-name - Name the the Bhyve VM
--ha - Run test for HA
--ha_license - The base64 encoded string of an HA license
- --debug-mode - Start API tests with middleware debug mode
--isns_ip <###.###.###.###> - IP of the iSNS server (default: {isns_ip})
--pool - Name of the ZFS pool (default: {pool_name})
""" % argv[0]
@@ -69,7 +68,6 @@
"ha",
"update",
"dev-test",
- "debug-mode",
"log-cli-level=",
"returncode",
"isns_ip=",
@@ -93,7 +91,6 @@
testexpr = None
ha = False
update = False
-debug_mode = False
verbose = 0
exitfirst = ''
returncode = False
@@ -128,8 +125,6 @@
hostname = arg
elif output == '--update':
update = True
- elif output == '--debug-mode':
- debug_mode = True
elif output == '-v':
verbose += 1
elif output == '-x':
@@ -260,7 +255,6 @@ def get_ipinfo(ip_to_use):
ha = {ha}
ha_license = "{ha_license}"
update = {update}
-debug_mode = {debug_mode}
artifacts = "{artifacts}"
isns_ip = "{isns_ip}"
"""
diff --git a/tests/unit/test_copytree.py b/tests/unit/test_copytree.py
index 433038a3a9411..e7d8e1adbe69e 100644
--- a/tests/unit/test_copytree.py
+++ b/tests/unit/test_copytree.py
@@ -4,7 +4,6 @@
import pytest
import random
import stat
-import subprocess
from middlewared.utils.filesystem import copy
from operator import eq, ne
@@ -76,11 +75,19 @@ def create_test_data(target: str, symlink_target_path) -> None:
Basic tree of files and directories including some symlinks
"""
- os.mkdir(os.path.join(target, 'SOURCE'))
- create_test_files(os.path.join(target, 'SOURCE'), symlink_target_path)
+ source = os.path.join(target, 'SOURCE')
+ os.mkdir(source)
+
+ for xat_name, xat_data in TEST_DIR_XATTRS:
+ os.setxattr(source, xat_name, xat_data)
+
+ os.chown(source, JENNY + 10, JENNY + 11)
+ os.chmod(source, 0o777)
+
+ create_test_files(source, symlink_target_path)
for dirname in TEST_DIRS:
- path = os.path.join(target, 'SOURCE', dirname)
+ path = os.path.join(source, dirname)
os.mkdir(path)
os.chmod(path, 0o777)
os.chown(path, JENNY, JENNY)
@@ -102,6 +109,8 @@ def create_test_data(target: str, symlink_target_path) -> None:
create_test_files(path, os.path.join(target, dirname))
os.utime(path, ns=(JENNY + 3, JENNY + 4))
+ os.utime(source, ns=(JENNY + 5, JENNY + 6))
+
@pytest.fixture(scope="function")
def directory_for_test(tmpdir):
@@ -249,7 +258,6 @@ def validate_the_things(
def validate_copy_tree(
src: str,
dst: str,
-
flags: copy.CopyFlags
):
with os.scandir(src) as it:
@@ -264,6 +272,8 @@ def validate_copy_tree(
if f.is_dir() and not f.is_symlink():
validate_copy_tree(new_src, new_dst, flags)
+ validate_the_things(src, dst, flags)
+
def test__copytree_default(directory_for_test, fd_count):
""" test basic behavior of copytree """
@@ -293,7 +303,7 @@ def test__copytree_exclude_ctldir(directory_for_test, fd_count, is_ctldir):
snapdir = os.path.join(src, '.zfs', 'snapshot', 'now')
os.makedirs(snapdir)
- with open(os.path.join(snapdir, 'canary'), 'w') as f:
+ with open(os.path.join(snapdir, 'canary'), 'w'):
pass
if is_ctldir:
diff --git a/tests/unit/test_fenced.py b/tests/unit/test_fenced.py
new file mode 100644
index 0000000000000..611e1fe860f70
--- /dev/null
+++ b/tests/unit/test_fenced.py
@@ -0,0 +1,30 @@
+import pytest
+
+from fenced.main import parse_ed
+from fenced.utils import load_disks_impl
+
+
+@pytest.mark.parametrize(
+ "exclude,expected",
+ [
+ ("sda,", ("sda",)),
+ ("", ()),
+ ("sda,sdb", ("sda", "sdb")),
+ ("sda, sdb", ("sda", "sdb")),
+ ("sda,sdb sdc", ("sda", "sdb", "sdc")),
+ ("sda sdb sdc", ("sda", "sdb", "sdc")),
+ ("sda sdb sdc", ("sda", "sdb", "sdc")),
+ ],
+)
+def test_parse_ed(exclude, expected):
+ assert parse_ed(exclude) == expected
+
+
+@pytest.mark.parametrize("exclude", [tuple(), ("sda"), ("sda,sdb")])
+def test_load_disks(exclude):
+ """We need to make sure that fenced always enumerates
+ a list of disks."""
+ disks = load_disks_impl(exclude)
+ assert disks
+ for disk in exclude:
+ assert disk not in disks